831 lines
28 KiB
Python
831 lines
28 KiB
Python
# Copyright 2010 United States Government as represented by the
|
|
# Administrator of the National Aeronautics and Space Administration.
|
|
# Copyright 2011 Justin Santa Barbara
|
|
# All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
"""Utilities and helper functions."""
|
|
|
|
|
|
import contextlib
|
|
import datetime
|
|
import hashlib
|
|
import inspect
|
|
import os
|
|
import pyclbr
|
|
import re
|
|
import shutil
|
|
import stat
|
|
import sys
|
|
import tempfile
|
|
from xml.dom import minidom
|
|
from xml.parsers import expat
|
|
from xml import sax
|
|
from xml.sax import expatreader
|
|
from xml.sax import saxutils
|
|
|
|
from os_brick.initiator import connector
|
|
from oslo_concurrency import lockutils
|
|
from oslo_concurrency import processutils
|
|
from oslo_config import cfg
|
|
from oslo_log import log as logging
|
|
from oslo_utils import encodeutils
|
|
from oslo_utils import importutils
|
|
from oslo_utils import timeutils
|
|
import retrying
|
|
import six
|
|
|
|
from cinder import exception
|
|
from cinder.i18n import _, _LE
|
|
|
|
|
|
CONF = cfg.CONF
|
|
LOG = logging.getLogger(__name__)
|
|
ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
|
|
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
|
|
|
|
synchronized = lockutils.synchronized_with_prefix('cinder-')
|
|
|
|
|
|
def find_config(config_path):
|
|
"""Find a configuration file using the given hint.
|
|
|
|
:param config_path: Full or relative path to the config.
|
|
:returns: Full path of the config, if it exists.
|
|
:raises: `cinder.exception.ConfigNotFound`
|
|
|
|
"""
|
|
possible_locations = [
|
|
config_path,
|
|
os.path.join(CONF.state_path, "etc", "cinder", config_path),
|
|
os.path.join(CONF.state_path, "etc", config_path),
|
|
os.path.join(CONF.state_path, config_path),
|
|
"/etc/cinder/%s" % config_path,
|
|
]
|
|
|
|
for path in possible_locations:
|
|
if os.path.exists(path):
|
|
return os.path.abspath(path)
|
|
|
|
raise exception.ConfigNotFound(path=os.path.abspath(config_path))
|
|
|
|
|
|
def as_int(obj, quiet=True):
|
|
# Try "2" -> 2
|
|
try:
|
|
return int(obj)
|
|
except (ValueError, TypeError):
|
|
pass
|
|
# Try "2.5" -> 2
|
|
try:
|
|
return int(float(obj))
|
|
except (ValueError, TypeError):
|
|
pass
|
|
# Eck, not sure what this is then.
|
|
if not quiet:
|
|
raise TypeError(_("Can not translate %s to integer.") % (obj))
|
|
return obj
|
|
|
|
|
|
def is_int_like(val):
|
|
"""Check if a value looks like an int."""
|
|
try:
|
|
return str(int(val)) == str(val)
|
|
except Exception:
|
|
return False
|
|
|
|
|
|
def check_exclusive_options(**kwargs):
|
|
"""Checks that only one of the provided options is actually not-none.
|
|
|
|
Iterates over all the kwargs passed in and checks that only one of said
|
|
arguments is not-none, if more than one is not-none then an exception will
|
|
be raised with the names of those arguments who were not-none.
|
|
"""
|
|
|
|
if not kwargs:
|
|
return
|
|
|
|
pretty_keys = kwargs.pop("pretty_keys", True)
|
|
exclusive_options = {}
|
|
for (k, v) in kwargs.items():
|
|
if v is not None:
|
|
exclusive_options[k] = True
|
|
|
|
if len(exclusive_options) > 1:
|
|
# Change the format of the names from pythonic to
|
|
# something that is more readable.
|
|
#
|
|
# Ex: 'the_key' -> 'the key'
|
|
if pretty_keys:
|
|
names = [k.replace('_', ' ') for k in kwargs.keys()]
|
|
else:
|
|
names = kwargs.keys()
|
|
names = ", ".join(sorted(names))
|
|
msg = (_("May specify only one of %s") % (names))
|
|
raise exception.InvalidInput(reason=msg)
|
|
|
|
|
|
def execute(*cmd, **kwargs):
|
|
"""Convenience wrapper around oslo's execute() method."""
|
|
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
|
|
kwargs['root_helper'] = get_root_helper()
|
|
return processutils.execute(*cmd, **kwargs)
|
|
|
|
|
|
def check_ssh_injection(cmd_list):
|
|
ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>',
|
|
'<']
|
|
|
|
# Check whether injection attacks exist
|
|
for arg in cmd_list:
|
|
arg = arg.strip()
|
|
|
|
# Check for matching quotes on the ends
|
|
is_quoted = re.match('^(?P<quote>[\'"])(?P<quoted>.*)(?P=quote)$', arg)
|
|
if is_quoted:
|
|
# Check for unescaped quotes within the quoted argument
|
|
quoted = is_quoted.group('quoted')
|
|
if quoted:
|
|
if (re.match('[\'"]', quoted) or
|
|
re.search('[^\\\\][\'"]', quoted)):
|
|
raise exception.SSHInjectionThreat(command=cmd_list)
|
|
else:
|
|
# We only allow spaces within quoted arguments, and that
|
|
# is the only special character allowed within quotes
|
|
if len(arg.split()) > 1:
|
|
raise exception.SSHInjectionThreat(command=cmd_list)
|
|
|
|
# Second, check whether danger character in command. So the shell
|
|
# special operator must be a single argument.
|
|
for c in ssh_injection_pattern:
|
|
if c not in arg:
|
|
continue
|
|
|
|
result = arg.find(c)
|
|
if not result == -1:
|
|
if result == 0 or not arg[result - 1] == '\\':
|
|
raise exception.SSHInjectionThreat(command=cmd_list)
|
|
|
|
|
|
def create_channel(client, width, height):
|
|
"""Invoke an interactive shell session on server."""
|
|
channel = client.invoke_shell()
|
|
channel.resize_pty(width, height)
|
|
return channel
|
|
|
|
|
|
def cinderdir():
|
|
import cinder
|
|
return os.path.abspath(cinder.__file__).split('cinder/__init__.py')[0]
|
|
|
|
|
|
def last_completed_audit_period(unit=None):
|
|
"""This method gives you the most recently *completed* audit period.
|
|
|
|
arguments:
|
|
units: string, one of 'hour', 'day', 'month', 'year'
|
|
Periods normally begin at the beginning (UTC) of the
|
|
period unit (So a 'day' period begins at midnight UTC,
|
|
a 'month' unit on the 1st, a 'year' on Jan, 1)
|
|
unit string may be appended with an optional offset
|
|
like so: 'day@18' This will begin the period at 18:00
|
|
UTC. 'month@15' starts a monthly period on the 15th,
|
|
and year@3 begins a yearly one on March 1st.
|
|
|
|
|
|
returns: 2 tuple of datetimes (begin, end)
|
|
The begin timestamp of this audit period is the same as the
|
|
end of the previous.
|
|
"""
|
|
if not unit:
|
|
unit = CONF.volume_usage_audit_period
|
|
|
|
offset = 0
|
|
if '@' in unit:
|
|
unit, offset = unit.split("@", 1)
|
|
offset = int(offset)
|
|
|
|
rightnow = timeutils.utcnow()
|
|
if unit not in ('month', 'day', 'year', 'hour'):
|
|
raise ValueError('Time period must be hour, day, month or year')
|
|
if unit == 'month':
|
|
if offset == 0:
|
|
offset = 1
|
|
end = datetime.datetime(day=offset,
|
|
month=rightnow.month,
|
|
year=rightnow.year)
|
|
if end >= rightnow:
|
|
year = rightnow.year
|
|
if 1 >= rightnow.month:
|
|
year -= 1
|
|
month = 12 + (rightnow.month - 1)
|
|
else:
|
|
month = rightnow.month - 1
|
|
end = datetime.datetime(day=offset,
|
|
month=month,
|
|
year=year)
|
|
year = end.year
|
|
if 1 >= end.month:
|
|
year -= 1
|
|
month = 12 + (end.month - 1)
|
|
else:
|
|
month = end.month - 1
|
|
begin = datetime.datetime(day=offset, month=month, year=year)
|
|
|
|
elif unit == 'year':
|
|
if offset == 0:
|
|
offset = 1
|
|
end = datetime.datetime(day=1, month=offset, year=rightnow.year)
|
|
if end >= rightnow:
|
|
end = datetime.datetime(day=1,
|
|
month=offset,
|
|
year=rightnow.year - 1)
|
|
begin = datetime.datetime(day=1,
|
|
month=offset,
|
|
year=rightnow.year - 2)
|
|
else:
|
|
begin = datetime.datetime(day=1,
|
|
month=offset,
|
|
year=rightnow.year - 1)
|
|
|
|
elif unit == 'day':
|
|
end = datetime.datetime(hour=offset,
|
|
day=rightnow.day,
|
|
month=rightnow.month,
|
|
year=rightnow.year)
|
|
if end >= rightnow:
|
|
end = end - datetime.timedelta(days=1)
|
|
begin = end - datetime.timedelta(days=1)
|
|
|
|
elif unit == 'hour':
|
|
end = rightnow.replace(minute=offset, second=0, microsecond=0)
|
|
if end >= rightnow:
|
|
end = end - datetime.timedelta(hours=1)
|
|
begin = end - datetime.timedelta(hours=1)
|
|
|
|
return (begin, end)
|
|
|
|
|
|
class ProtectedExpatParser(expatreader.ExpatParser):
|
|
"""An expat parser which disables DTD's and entities by default."""
|
|
|
|
def __init__(self, forbid_dtd=True, forbid_entities=True,
|
|
*args, **kwargs):
|
|
# Python 2.x old style class
|
|
expatreader.ExpatParser.__init__(self, *args, **kwargs)
|
|
self.forbid_dtd = forbid_dtd
|
|
self.forbid_entities = forbid_entities
|
|
|
|
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
|
|
raise ValueError("Inline DTD forbidden")
|
|
|
|
def entity_decl(self, entityName, is_parameter_entity, value, base,
|
|
systemId, publicId, notationName):
|
|
raise ValueError("<!ENTITY> forbidden")
|
|
|
|
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
|
|
# expat 1.2
|
|
raise ValueError("<!ENTITY> forbidden")
|
|
|
|
def reset(self):
|
|
expatreader.ExpatParser.reset(self)
|
|
if self.forbid_dtd:
|
|
self._parser.StartDoctypeDeclHandler = self.start_doctype_decl
|
|
if self.forbid_entities:
|
|
self._parser.EntityDeclHandler = self.entity_decl
|
|
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
|
|
|
|
|
|
def safe_minidom_parse_string(xml_string):
|
|
"""Parse an XML string using minidom safely.
|
|
|
|
"""
|
|
try:
|
|
return minidom.parseString(xml_string, parser=ProtectedExpatParser())
|
|
except sax.SAXParseException:
|
|
raise expat.ExpatError()
|
|
|
|
|
|
def xhtml_escape(value):
|
|
"""Escapes a string so it is valid within XML or XHTML.
|
|
|
|
"""
|
|
return saxutils.escape(value, {'"': '"', "'": '''})
|
|
|
|
|
|
def get_from_path(items, path):
|
|
"""Returns a list of items matching the specified path.
|
|
|
|
Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item
|
|
in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the
|
|
intermediate results are lists it will treat each list item individually.
|
|
A 'None' in items or any child expressions will be ignored, this function
|
|
will not throw because of None (anywhere) in items. The returned list
|
|
will contain no None values.
|
|
|
|
"""
|
|
if path is None:
|
|
raise exception.Error('Invalid mini_xpath')
|
|
|
|
(first_token, sep, remainder) = path.partition('/')
|
|
|
|
if first_token == '':
|
|
raise exception.Error('Invalid mini_xpath')
|
|
|
|
results = []
|
|
|
|
if items is None:
|
|
return results
|
|
|
|
if not isinstance(items, list):
|
|
# Wrap single objects in a list
|
|
items = [items]
|
|
|
|
for item in items:
|
|
if item is None:
|
|
continue
|
|
get_method = getattr(item, 'get', None)
|
|
if get_method is None:
|
|
continue
|
|
child = get_method(first_token)
|
|
if child is None:
|
|
continue
|
|
if isinstance(child, list):
|
|
# Flatten intermediate lists
|
|
for x in child:
|
|
results.append(x)
|
|
else:
|
|
results.append(child)
|
|
|
|
if not sep:
|
|
# No more tokens
|
|
return results
|
|
else:
|
|
return get_from_path(results, remainder)
|
|
|
|
|
|
def is_valid_boolstr(val):
|
|
"""Check if the provided string is a valid bool string or not."""
|
|
val = str(val).lower()
|
|
return (val == 'true' or val == 'false' or
|
|
val == 'yes' or val == 'no' or
|
|
val == 'y' or val == 'n' or
|
|
val == '1' or val == '0')
|
|
|
|
|
|
def is_none_string(val):
|
|
"""Check if a string represents a None value."""
|
|
if not isinstance(val, six.string_types):
|
|
return False
|
|
|
|
return val.lower() == 'none'
|
|
|
|
|
|
def monkey_patch():
|
|
"""If the CONF.monkey_patch set as True,
|
|
this function patches a decorator
|
|
for all functions in specified modules.
|
|
|
|
You can set decorators for each modules
|
|
using CONF.monkey_patch_modules.
|
|
The format is "Module path:Decorator function".
|
|
Example: 'cinder.api.ec2.cloud:' \
|
|
cinder.openstack.common.notifier.api.notify_decorator'
|
|
|
|
Parameters of the decorator is as follows.
|
|
(See cinder.openstack.common.notifier.api.notify_decorator)
|
|
|
|
name - name of the function
|
|
function - object of the function
|
|
"""
|
|
# If CONF.monkey_patch is not True, this function do nothing.
|
|
if not CONF.monkey_patch:
|
|
return
|
|
# Get list of modules and decorators
|
|
for module_and_decorator in CONF.monkey_patch_modules:
|
|
module, decorator_name = module_and_decorator.split(':')
|
|
# import decorator function
|
|
decorator = importutils.import_class(decorator_name)
|
|
__import__(module)
|
|
# Retrieve module information using pyclbr
|
|
module_data = pyclbr.readmodule_ex(module)
|
|
for key in module_data.keys():
|
|
# set the decorator for the class methods
|
|
if isinstance(module_data[key], pyclbr.Class):
|
|
clz = importutils.import_class("%s.%s" % (module, key))
|
|
for method, func in inspect.getmembers(clz, inspect.ismethod):
|
|
setattr(
|
|
clz, method,
|
|
decorator("%s.%s.%s" % (module, key, method), func))
|
|
# set the decorator for the function
|
|
if isinstance(module_data[key], pyclbr.Function):
|
|
func = importutils.import_class("%s.%s" % (module, key))
|
|
setattr(sys.modules[module], key,
|
|
decorator("%s.%s" % (module, key), func))
|
|
|
|
|
|
def make_dev_path(dev, partition=None, base='/dev'):
|
|
"""Return a path to a particular device.
|
|
|
|
>>> make_dev_path('xvdc')
|
|
/dev/xvdc
|
|
|
|
>>> make_dev_path('xvdc', 1)
|
|
/dev/xvdc1
|
|
"""
|
|
path = os.path.join(base, dev)
|
|
if partition:
|
|
path += str(partition)
|
|
return path
|
|
|
|
|
|
def sanitize_hostname(hostname):
|
|
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
|
|
if six.PY3:
|
|
hostname = hostname.encode('latin-1', 'ignore')
|
|
hostname = hostname.decode('latin-1')
|
|
else:
|
|
if isinstance(hostname, six.text_type):
|
|
hostname = hostname.encode('latin-1', 'ignore')
|
|
|
|
hostname = re.sub('[ _]', '-', hostname)
|
|
hostname = re.sub('[^\w.-]+', '', hostname)
|
|
hostname = hostname.lower()
|
|
hostname = hostname.strip('.-')
|
|
|
|
return hostname
|
|
|
|
|
|
def hash_file(file_like_object):
|
|
"""Generate a hash for the contents of a file."""
|
|
checksum = hashlib.sha1()
|
|
any(map(checksum.update, iter(lambda: file_like_object.read(32768), b'')))
|
|
return checksum.hexdigest()
|
|
|
|
|
|
def service_is_up(service):
|
|
"""Check whether a service is up based on last heartbeat."""
|
|
last_heartbeat = service['updated_at'] or service['created_at']
|
|
# Timestamps in DB are UTC.
|
|
elapsed = (timeutils.utcnow() - last_heartbeat).total_seconds()
|
|
return abs(elapsed) <= CONF.service_down_time
|
|
|
|
|
|
def read_file_as_root(file_path):
|
|
"""Secure helper to read file as root."""
|
|
try:
|
|
out, _err = execute('cat', file_path, run_as_root=True)
|
|
return out
|
|
except processutils.ProcessExecutionError:
|
|
raise exception.FileNotFound(file_path=file_path)
|
|
|
|
|
|
@contextlib.contextmanager
|
|
def temporary_chown(path, owner_uid=None):
|
|
"""Temporarily chown a path.
|
|
|
|
:params owner_uid: UID of temporary owner (defaults to current user)
|
|
"""
|
|
if owner_uid is None:
|
|
owner_uid = os.getuid()
|
|
|
|
orig_uid = os.stat(path).st_uid
|
|
|
|
if orig_uid != owner_uid:
|
|
execute('chown', owner_uid, path, run_as_root=True)
|
|
try:
|
|
yield
|
|
finally:
|
|
if orig_uid != owner_uid:
|
|
execute('chown', orig_uid, path, run_as_root=True)
|
|
|
|
|
|
@contextlib.contextmanager
|
|
def tempdir(**kwargs):
|
|
tmpdir = tempfile.mkdtemp(**kwargs)
|
|
try:
|
|
yield tmpdir
|
|
finally:
|
|
try:
|
|
shutil.rmtree(tmpdir)
|
|
except OSError as e:
|
|
LOG.debug('Could not remove tmpdir: %s',
|
|
six.text_type(e))
|
|
|
|
|
|
def walk_class_hierarchy(clazz, encountered=None):
|
|
"""Walk class hierarchy, yielding most derived classes first."""
|
|
if not encountered:
|
|
encountered = []
|
|
for subclass in clazz.__subclasses__():
|
|
if subclass not in encountered:
|
|
encountered.append(subclass)
|
|
# drill down to leaves first
|
|
for subsubclass in walk_class_hierarchy(subclass, encountered):
|
|
yield subsubclass
|
|
yield subclass
|
|
|
|
|
|
def get_root_helper():
|
|
return 'sudo cinder-rootwrap %s' % CONF.rootwrap_config
|
|
|
|
|
|
def brick_get_connector_properties(multipath=False, enforce_multipath=False):
|
|
"""wrapper for the brick calls to automatically set
|
|
the root_helper needed for cinder.
|
|
|
|
:param multipath: A boolean indicating whether the connector can
|
|
support multipath.
|
|
:param enforce_multipath: If True, it raises exception when multipath=True
|
|
is specified but multipathd is not running.
|
|
If False, it falls back to multipath=False
|
|
when multipathd is not running.
|
|
"""
|
|
|
|
root_helper = get_root_helper()
|
|
return connector.get_connector_properties(root_helper,
|
|
CONF.my_ip,
|
|
multipath,
|
|
enforce_multipath)
|
|
|
|
|
|
def brick_get_connector(protocol, driver=None,
|
|
execute=processutils.execute,
|
|
use_multipath=False,
|
|
device_scan_attempts=3,
|
|
*args, **kwargs):
|
|
"""Wrapper to get a brick connector object.
|
|
This automatically populates the required protocol as well
|
|
as the root_helper needed to execute commands.
|
|
"""
|
|
|
|
root_helper = get_root_helper()
|
|
return connector.InitiatorConnector.factory(protocol, root_helper,
|
|
driver=driver,
|
|
execute=execute,
|
|
use_multipath=use_multipath,
|
|
device_scan_attempts=
|
|
device_scan_attempts,
|
|
*args, **kwargs)
|
|
|
|
|
|
def require_driver_initialized(driver):
|
|
"""Verifies if `driver` is initialized
|
|
|
|
If the driver is not initialized, an exception will be raised.
|
|
|
|
:params driver: The driver instance.
|
|
:raises: `exception.DriverNotInitialized`
|
|
"""
|
|
# we can't do anything if the driver didn't init
|
|
if not driver.initialized:
|
|
driver_name = driver.__class__.__name__
|
|
LOG.error(_LE("Volume driver %s not initialized"), driver_name)
|
|
raise exception.DriverNotInitialized()
|
|
|
|
|
|
def get_file_mode(path):
|
|
"""This primarily exists to make unit testing easier."""
|
|
return stat.S_IMODE(os.stat(path).st_mode)
|
|
|
|
|
|
def get_file_gid(path):
|
|
"""This primarily exists to make unit testing easier."""
|
|
return os.stat(path).st_gid
|
|
|
|
|
|
def get_file_size(path):
|
|
"""Returns the file size."""
|
|
return os.stat(path).st_size
|
|
|
|
|
|
def _get_disk_of_partition(devpath, st=None):
|
|
"""Returns a disk device path from a partition device path, and stat for
|
|
the device. If devpath is not a partition, devpath is returned as it is.
|
|
For example, '/dev/sda' is returned for '/dev/sda1', and '/dev/disk1' is
|
|
for '/dev/disk1p1' ('p' is prepended to the partition number if the disk
|
|
name ends with numbers).
|
|
"""
|
|
diskpath = re.sub('(?:(?<=\d)p)?\d+$', '', devpath)
|
|
if diskpath != devpath:
|
|
try:
|
|
st_disk = os.stat(diskpath)
|
|
if stat.S_ISBLK(st_disk.st_mode):
|
|
return (diskpath, st_disk)
|
|
except OSError:
|
|
pass
|
|
# devpath is not a partition
|
|
if st is None:
|
|
st = os.stat(devpath)
|
|
return (devpath, st)
|
|
|
|
|
|
def get_blkdev_major_minor(path, lookup_for_file=True):
|
|
"""Get the device's "major:minor" number of a block device to control
|
|
I/O ratelimit of the specified path.
|
|
If lookup_for_file is True and the path is a regular file, lookup a disk
|
|
device which the file lies on and returns the result for the device.
|
|
"""
|
|
st = os.stat(path)
|
|
if stat.S_ISBLK(st.st_mode):
|
|
path, st = _get_disk_of_partition(path, st)
|
|
return '%d:%d' % (os.major(st.st_rdev), os.minor(st.st_rdev))
|
|
elif stat.S_ISCHR(st.st_mode):
|
|
# No I/O ratelimit control is provided for character devices
|
|
return None
|
|
elif lookup_for_file:
|
|
# lookup the mounted disk which the file lies on
|
|
out, _err = execute('df', path)
|
|
devpath = out.split("\n")[1].split()[0]
|
|
if devpath[0] is not '/':
|
|
# the file is on a network file system
|
|
return None
|
|
return get_blkdev_major_minor(devpath, False)
|
|
else:
|
|
msg = _("Unable to get a block device for file \'%s\'") % path
|
|
raise exception.Error(msg)
|
|
|
|
|
|
def check_string_length(value, name, min_length=0, max_length=None):
|
|
"""Check the length of specified string
|
|
:param value: the value of the string
|
|
:param name: the name of the string
|
|
:param min_length: the min_length of the string
|
|
:param max_length: the max_length of the string
|
|
"""
|
|
if not isinstance(value, six.string_types):
|
|
msg = _("%s is not a string or unicode") % name
|
|
raise exception.InvalidInput(message=msg)
|
|
|
|
if len(value) < min_length:
|
|
msg = _("%(name)s has a minimum character requirement of "
|
|
"%(min_length)s.") % {'name': name, 'min_length': min_length}
|
|
raise exception.InvalidInput(message=msg)
|
|
|
|
if max_length and len(value) > max_length:
|
|
msg = _("%(name)s has more than %(max_length)s "
|
|
"characters.") % {'name': name, 'max_length': max_length}
|
|
raise exception.InvalidInput(message=msg)
|
|
|
|
_visible_admin_metadata_keys = ['readonly', 'attached_mode']
|
|
|
|
|
|
def add_visible_admin_metadata(volume):
|
|
"""Add user-visible admin metadata to regular metadata.
|
|
|
|
Extracts the admin metadata keys that are to be made visible to
|
|
non-administrators, and adds them to the regular metadata structure for the
|
|
passed-in volume.
|
|
"""
|
|
visible_admin_meta = {}
|
|
|
|
if volume.get('volume_admin_metadata'):
|
|
for item in volume['volume_admin_metadata']:
|
|
if item['key'] in _visible_admin_metadata_keys:
|
|
visible_admin_meta[item['key']] = item['value']
|
|
# avoid circular ref when volume is a Volume instance
|
|
elif (volume.get('admin_metadata') and
|
|
isinstance(volume.get('admin_metadata'), dict)):
|
|
for key in _visible_admin_metadata_keys:
|
|
if key in volume['admin_metadata'].keys():
|
|
visible_admin_meta[key] = volume['admin_metadata'][key]
|
|
|
|
if not visible_admin_meta:
|
|
return
|
|
|
|
# NOTE(zhiyan): update visible administration metadata to
|
|
# volume metadata, administration metadata will rewrite existing key.
|
|
if volume.get('volume_metadata'):
|
|
orig_meta = list(volume.get('volume_metadata'))
|
|
for item in orig_meta:
|
|
if item['key'] in visible_admin_meta.keys():
|
|
item['value'] = visible_admin_meta.pop(item['key'])
|
|
for key, value in visible_admin_meta.items():
|
|
orig_meta.append({'key': key, 'value': value})
|
|
volume['volume_metadata'] = orig_meta
|
|
# avoid circular ref when vol is a Volume instance
|
|
elif (volume.get('metadata') and
|
|
isinstance(volume.get('metadata'), dict)):
|
|
volume['metadata'].update(visible_admin_meta)
|
|
else:
|
|
volume['metadata'] = visible_admin_meta
|
|
|
|
|
|
def remove_invalid_filter_options(context, filters,
|
|
allowed_search_options):
|
|
"""Remove search options that are not valid
|
|
for non-admin API/context.
|
|
"""
|
|
if context.is_admin:
|
|
# Allow all options
|
|
return
|
|
# Otherwise, strip out all unknown options
|
|
unknown_options = [opt for opt in filters
|
|
if opt not in allowed_search_options]
|
|
bad_options = ", ".join(unknown_options)
|
|
LOG.debug("Removing options '%s' from query.", bad_options)
|
|
for opt in unknown_options:
|
|
del filters[opt]
|
|
|
|
|
|
def is_blk_device(dev):
|
|
try:
|
|
if stat.S_ISBLK(os.stat(dev).st_mode):
|
|
return True
|
|
return False
|
|
except Exception:
|
|
LOG.debug('Path %s not found in is_blk_device check', dev)
|
|
return False
|
|
|
|
|
|
def retry(exceptions, interval=1, retries=3, backoff_rate=2):
|
|
|
|
def _retry_on_exception(e):
|
|
return isinstance(e, exceptions)
|
|
|
|
def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms):
|
|
exp = backoff_rate ** previous_attempt_number
|
|
wait_for = max(0, interval * exp)
|
|
LOG.debug("Sleeping for %s seconds", wait_for)
|
|
return wait_for * 1000.0
|
|
|
|
def _print_stop(previous_attempt_number, delay_since_first_attempt_ms):
|
|
delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0
|
|
LOG.debug("Failed attempt %s", previous_attempt_number)
|
|
LOG.debug("Have been at this for %s seconds",
|
|
delay_since_first_attempt)
|
|
return previous_attempt_number == retries
|
|
|
|
if retries < 1:
|
|
raise ValueError('Retries must be greater than or '
|
|
'equal to 1 (received: %s). ' % retries)
|
|
|
|
def _decorator(f):
|
|
|
|
@six.wraps(f)
|
|
def _wrapper(*args, **kwargs):
|
|
r = retrying.Retrying(retry_on_exception=_retry_on_exception,
|
|
wait_func=_backoff_sleep,
|
|
stop_func=_print_stop)
|
|
return r.call(f, *args, **kwargs)
|
|
|
|
return _wrapper
|
|
|
|
return _decorator
|
|
|
|
|
|
def convert_version_to_int(version):
|
|
try:
|
|
if isinstance(version, six.string_types):
|
|
version = convert_version_to_tuple(version)
|
|
if isinstance(version, tuple):
|
|
return six.moves.reduce(lambda x, y: (x * 1000) + y, version)
|
|
except Exception:
|
|
msg = _("Version %s is invalid.") % version
|
|
raise exception.CinderException(msg)
|
|
|
|
|
|
def convert_version_to_str(version_int):
|
|
version_numbers = []
|
|
factor = 1000
|
|
while version_int != 0:
|
|
version_number = version_int - (version_int // factor * factor)
|
|
version_numbers.insert(0, six.text_type(version_number))
|
|
version_int = version_int // factor
|
|
|
|
return '.'.join(map(str, version_numbers))
|
|
|
|
|
|
def convert_version_to_tuple(version_str):
|
|
return tuple(int(part) for part in version_str.split('.'))
|
|
|
|
|
|
def convert_str(text):
|
|
"""Convert to native string.
|
|
|
|
Convert bytes and Unicode strings to native strings:
|
|
|
|
* convert to bytes on Python 2:
|
|
encode Unicode using encodeutils.safe_encode()
|
|
* convert to Unicode on Python 3: decode bytes from UTF-8
|
|
"""
|
|
if six.PY2:
|
|
return encodeutils.safe_encode(text)
|
|
else:
|
|
if isinstance(text, bytes):
|
|
return text.decode('utf-8')
|
|
else:
|
|
return text
|