2010-05-27 23:05:26 -07:00
|
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
2010-06-23 22:04:16 -07:00
|
|
|
|
|
|
|
# Copyright 2010 United States Government as represented by the
|
2010-06-23 23:15:06 -07:00
|
|
|
# Administrator of the National Aeronautics and Space Administration.
|
2011-02-23 12:05:49 -08:00
|
|
|
# Copyright 2011 Justin Santa Barbara
|
2010-06-23 22:04:16 -07:00
|
|
|
# All Rights Reserved.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
|
|
# not use this file except in compliance with the License. You may obtain
|
|
|
|
# a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
2010-05-27 23:05:26 -07:00
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
2010-06-23 22:04:16 -07:00
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
# License for the specific language governing permissions and limitations
|
|
|
|
# under the License.
|
2010-05-27 23:05:26 -07:00
|
|
|
|
2011-04-20 12:08:22 -07:00
|
|
|
"""Utilities and helper functions."""
|
2010-05-27 23:05:26 -07:00
|
|
|
|
2011-10-10 17:58:56 -05:00
|
|
|
import contextlib
|
2010-08-16 14:16:21 +02:00
|
|
|
import datetime
|
2011-03-01 20:49:46 +01:00
|
|
|
import functools
|
2012-01-09 18:21:57 +11:00
|
|
|
import hashlib
|
2010-07-08 10:53:27 -07:00
|
|
|
import inspect
|
2012-01-05 15:37:18 +00:00
|
|
|
import itertools
|
2010-12-22 17:53:42 -08:00
|
|
|
import json
|
2010-07-07 12:24:24 -07:00
|
|
|
import os
|
2012-01-10 11:22:33 -06:00
|
|
|
import pyclbr
|
2010-05-27 23:05:26 -07:00
|
|
|
import random
|
2011-03-01 20:49:46 +01:00
|
|
|
import re
|
2011-08-05 14:33:12 +02:00
|
|
|
import shlex
|
2012-02-28 05:54:48 +00:00
|
|
|
import shutil
|
2010-07-14 16:27:18 -05:00
|
|
|
import socket
|
2010-11-23 21:48:32 +00:00
|
|
|
import struct
|
2010-07-08 10:53:27 -07:00
|
|
|
import sys
|
2012-02-28 05:54:48 +00:00
|
|
|
import tempfile
|
2012-03-16 13:25:05 -07:00
|
|
|
import threading
|
2010-12-15 11:23:33 -08:00
|
|
|
import time
|
2012-01-10 11:22:33 -06:00
|
|
|
import types
|
2011-06-15 20:11:34 +00:00
|
|
|
import uuid
|
2012-01-10 11:22:33 -06:00
|
|
|
import warnings
|
2010-10-25 19:21:09 +09:00
|
|
|
from xml.sax import saxutils
|
2010-05-27 23:05:26 -07:00
|
|
|
|
2012-03-16 13:25:05 -07:00
|
|
|
from eventlet import corolocal
|
2010-10-25 03:45:19 +09:00
|
|
|
from eventlet import event
|
|
|
|
from eventlet import greenthread
|
2011-03-22 10:35:43 +01:00
|
|
|
from eventlet import semaphore
|
2011-02-14 21:06:16 +01:00
|
|
|
from eventlet.green import subprocess
|
2012-02-15 16:48:50 +00:00
|
|
|
import iso8601
|
2012-02-24 09:56:26 -08:00
|
|
|
import lockfile
|
2011-12-29 10:55:42 -05:00
|
|
|
import netaddr
|
2011-04-20 12:08:22 -07:00
|
|
|
|
2010-07-28 12:53:27 -07:00
|
|
|
from nova import exception
|
2011-02-28 23:31:09 +01:00
|
|
|
from nova import flags
|
2011-01-04 00:23:35 -05:00
|
|
|
from nova import log as logging
|
2012-02-03 00:50:58 +00:00
|
|
|
from nova.openstack.common import cfg
|
2010-06-10 18:39:07 +01:00
|
|
|
|
2010-08-16 14:16:21 +02:00
|
|
|
|
2012-02-14 12:07:02 -06:00
|
|
|
LOG = logging.getLogger(__name__)
|
2012-02-15 16:48:50 +00:00
|
|
|
ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
|
2011-07-21 22:46:36 +00:00
|
|
|
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
|
2011-02-28 23:31:09 +01:00
|
|
|
FLAGS = flags.FLAGS
|
2010-06-10 18:39:07 +01:00
|
|
|
|
2012-02-03 00:50:58 +00:00
|
|
|
FLAGS.register_opt(
|
2012-02-02 11:38:25 -05:00
|
|
|
cfg.BoolOpt('disable_process_locking', default=False,
|
|
|
|
help='Whether to disable inter-process locks'))
|
|
|
|
|
|
|
|
|
2010-07-28 12:53:27 -07:00
|
|
|
def import_class(import_str):
|
2011-04-20 12:08:22 -07:00
|
|
|
"""Returns a class from a string including module and class."""
|
2010-07-28 12:53:27 -07:00
|
|
|
mod_str, _sep, class_str = import_str.rpartition('.')
|
|
|
|
try:
|
|
|
|
__import__(mod_str)
|
|
|
|
return getattr(sys.modules[mod_str], class_str)
|
2010-12-26 14:08:38 +00:00
|
|
|
except (ImportError, ValueError, AttributeError), exc:
|
2011-02-21 13:46:41 -08:00
|
|
|
LOG.debug(_('Inner Exception: %s'), exc)
|
Include original exception in ClassNotFound exception
By doing this, we allow the error messages to be more useful. When an import
of a class fails because of a missing module dependency, it would have fail
that way for example:
$ nova-manage
Traceback (most recent call last):
File "./bin/nova-manage", line 84, in <module>
from nova import image
File "/home/jd/Work/src/nova/nova/image/__init__.py", line 22, in <module>
from nova.image import glance
File "/home/jd/Work/src/nova/nova/image/glance.py", line 42, in <module>
GlanceClient = utils.import_class('glance.client.Client')
File "/home/jd/Work/src/nova/nova/utils.py", line 66, in import_class
raise exception.ClassNotFound(class_name=class_str)
nova.exception.ClassNotFound: Class Client could not be found
This does not help the user, since it indicates the class Client cannot be
found, even if it is actually found but fail to import.
With this commit, the error message is better:
nova-manage
Traceback (most recent call last):
File "./bin/nova-manage", line 84, in <module>
from nova import image
File "/home/jd/Work/src/nova/nova/image/__init__.py", line 22, in <module>
from nova.image import glance
File "/home/jd/Work/src/nova/nova/image/glance.py", line 42, in <module>
GlanceClient = utils.import_class('glance.client.Client')
File "/home/jd/Work/src/nova/nova/utils.py", line 66, in import_class
raise exception.ClassNotFound(class_name=class_str, exception=exc)
nova.exception.ClassNotFound: Class Client could not be found: No module named kombu.connection
This helps to know that in this kombu is missing.
It would probably even better to rename ClassNotFound to
ClassCannotBeImported or something like that too.
Change-Id: I4100d931a0a825fa0729d5467d2f9268fdd2a261
Signed-off-by: Julien Danjou <julien.danjou@enovance.com>
2011-10-06 17:15:32 +02:00
|
|
|
raise exception.ClassNotFound(class_name=class_str, exception=exc)
|
2010-06-10 18:39:07 +01:00
|
|
|
|
2010-10-21 11:49:51 -07:00
|
|
|
|
2010-08-27 23:10:57 -07:00
|
|
|
def import_object(import_str):
|
2011-04-20 12:08:22 -07:00
|
|
|
"""Returns an object including a module or module and class."""
|
2010-08-27 23:10:57 -07:00
|
|
|
try:
|
2010-08-30 00:55:19 -07:00
|
|
|
__import__(import_str)
|
|
|
|
return sys.modules[import_str]
|
|
|
|
except ImportError:
|
|
|
|
cls = import_class(import_str)
|
2010-08-27 23:10:57 -07:00
|
|
|
return cls()
|
2010-08-16 14:16:21 +02:00
|
|
|
|
2010-10-21 11:49:51 -07:00
|
|
|
|
2011-12-10 14:01:17 -08:00
|
|
|
def find_config(config_path):
|
|
|
|
"""Find a configuration file using the given hint.
|
|
|
|
|
|
|
|
:param config_path: Full or relative path to the config.
|
|
|
|
:returns: Full path of the config, if it exists.
|
|
|
|
:raises: `nova.exception.ConfigNotFound`
|
|
|
|
|
|
|
|
"""
|
|
|
|
possible_locations = [
|
|
|
|
config_path,
|
|
|
|
os.path.join(FLAGS.state_path, "etc", "nova", config_path),
|
|
|
|
os.path.join(FLAGS.state_path, "etc", config_path),
|
|
|
|
os.path.join(FLAGS.state_path, config_path),
|
|
|
|
"/etc/nova/%s" % config_path,
|
|
|
|
]
|
|
|
|
|
|
|
|
for path in possible_locations:
|
|
|
|
if os.path.exists(path):
|
|
|
|
return os.path.abspath(path)
|
|
|
|
|
|
|
|
raise exception.ConfigNotFound(path=os.path.abspath(config_path))
|
|
|
|
|
|
|
|
|
2010-11-23 21:48:32 +00:00
|
|
|
def vpn_ping(address, port, timeout=0.05, session_id=None):
|
|
|
|
"""Sends a vpn negotiation packet and returns the server session.
|
|
|
|
|
|
|
|
Returns False on a failure. Basic packet structure is below.
|
|
|
|
|
|
|
|
Client packet (14 bytes)::
|
2012-03-05 14:33:40 -05:00
|
|
|
|
|
|
|
0 1 8 9 13
|
|
|
|
+-+--------+-----+
|
|
|
|
|x| cli_id |?????|
|
|
|
|
+-+--------+-----+
|
|
|
|
x = packet identifier 0x38
|
|
|
|
cli_id = 64 bit identifier
|
|
|
|
? = unknown, probably flags/padding
|
2010-11-23 21:48:32 +00:00
|
|
|
|
|
|
|
Server packet (26 bytes)::
|
2012-03-05 14:33:40 -05:00
|
|
|
|
|
|
|
0 1 8 9 13 14 21 2225
|
|
|
|
+-+--------+-----+--------+----+
|
|
|
|
|x| srv_id |?????| cli_id |????|
|
|
|
|
+-+--------+-----+--------+----+
|
|
|
|
x = packet identifier 0x40
|
|
|
|
cli_id = 64 bit identifier
|
|
|
|
? = unknown, probably flags/padding
|
|
|
|
bit 9 was 1 and the rest were 0 in testing
|
2011-04-20 12:08:22 -07:00
|
|
|
|
2010-11-23 21:48:32 +00:00
|
|
|
"""
|
|
|
|
if session_id is None:
|
|
|
|
session_id = random.randint(0, 0xffffffffffffffff)
|
|
|
|
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
2012-01-31 17:42:23 -05:00
|
|
|
data = struct.pack('!BQxxxxx', 0x38, session_id)
|
2010-11-23 21:48:32 +00:00
|
|
|
sock.sendto(data, (address, port))
|
|
|
|
sock.settimeout(timeout)
|
|
|
|
try:
|
|
|
|
received = sock.recv(2048)
|
|
|
|
except socket.timeout:
|
|
|
|
return False
|
|
|
|
finally:
|
|
|
|
sock.close()
|
2011-04-20 12:08:22 -07:00
|
|
|
fmt = '!BQxxxxxQxxxx'
|
2010-11-23 21:48:32 +00:00
|
|
|
if len(received) != struct.calcsize(fmt):
|
|
|
|
print struct.calcsize(fmt)
|
|
|
|
return False
|
|
|
|
(identifier, server_sess, client_sess) = struct.unpack(fmt, received)
|
|
|
|
if identifier == 0x40 and client_sess == session_id:
|
|
|
|
return server_sess
|
|
|
|
|
|
|
|
|
2010-05-27 23:05:26 -07:00
|
|
|
def fetchfile(url, target):
|
2011-04-20 12:08:22 -07:00
|
|
|
LOG.debug(_('Fetching %s') % url)
|
|
|
|
execute('curl', '--fail', url, '-o', target)
|
2010-05-27 23:05:26 -07:00
|
|
|
|
|
|
|
|
2011-03-09 00:30:05 -05:00
|
|
|
def execute(*cmd, **kwargs):
|
2012-03-05 14:33:40 -05:00
|
|
|
"""Helper method to execute command with optional retry.
|
|
|
|
|
2012-02-29 16:22:42 +01:00
|
|
|
If you add a run_as_root=True command, don't forget to add the
|
|
|
|
corresponding filter to nova.rootwrap !
|
2011-08-03 19:17:08 -04:00
|
|
|
|
2012-03-05 14:33:40 -05:00
|
|
|
:param cmd: Passed to subprocess.Popen.
|
|
|
|
:param process_input: Send to opened process.
|
|
|
|
:param check_exit_code: Single bool, int, or list of allowed exit
|
|
|
|
codes. Defaults to [0]. Raise
|
|
|
|
exception.ProcessExecutionError unless
|
|
|
|
program exits with one of these code.
|
|
|
|
:param delay_on_retry: True | False. Defaults to True. If set to
|
|
|
|
True, wait a short amount of time
|
|
|
|
before retrying.
|
|
|
|
:param attempts: How many times to retry cmd.
|
|
|
|
:param run_as_root: True | False. Defaults to False. If set to True,
|
|
|
|
the command is prefixed by the command specified
|
|
|
|
in the root_helper FLAG.
|
|
|
|
|
|
|
|
:raises exception.Error: on receiving unknown arguments
|
|
|
|
:raises exception.ProcessExecutionError:
|
|
|
|
|
|
|
|
:returns: a tuple, (stdout, stderr) from the spawned process, or None if
|
2011-12-16 09:19:39 -05:00
|
|
|
the command fails.
|
2011-08-03 19:17:08 -04:00
|
|
|
"""
|
|
|
|
|
2011-03-14 10:46:26 +01:00
|
|
|
process_input = kwargs.pop('process_input', None)
|
2011-12-13 19:05:21 -06:00
|
|
|
check_exit_code = kwargs.pop('check_exit_code', [0])
|
2011-12-14 22:16:25 -05:00
|
|
|
ignore_exit_code = False
|
2012-01-02 17:31:36 +08:00
|
|
|
if isinstance(check_exit_code, bool):
|
2011-12-14 22:16:25 -05:00
|
|
|
ignore_exit_code = not check_exit_code
|
|
|
|
check_exit_code = [0]
|
2012-01-02 17:31:36 +08:00
|
|
|
elif isinstance(check_exit_code, int):
|
|
|
|
check_exit_code = [check_exit_code]
|
2011-03-14 10:46:26 +01:00
|
|
|
delay_on_retry = kwargs.pop('delay_on_retry', True)
|
|
|
|
attempts = kwargs.pop('attempts', 1)
|
2011-08-05 14:33:12 +02:00
|
|
|
run_as_root = kwargs.pop('run_as_root', False)
|
2011-10-17 16:01:46 +00:00
|
|
|
shell = kwargs.pop('shell', False)
|
|
|
|
|
2011-03-14 10:46:26 +01:00
|
|
|
if len(kwargs):
|
|
|
|
raise exception.Error(_('Got unknown keyword args '
|
|
|
|
'to utils.execute: %r') % kwargs)
|
2011-08-05 14:33:12 +02:00
|
|
|
|
|
|
|
if run_as_root:
|
2011-08-09 14:11:15 +01:00
|
|
|
cmd = shlex.split(FLAGS.root_helper) + list(cmd)
|
2011-03-09 15:33:20 -05:00
|
|
|
cmd = map(str, cmd)
|
2010-10-21 11:49:51 -07:00
|
|
|
|
2011-02-18 22:10:06 +01:00
|
|
|
while attempts > 0:
|
|
|
|
attempts -= 1
|
|
|
|
try:
|
2011-04-20 12:08:22 -07:00
|
|
|
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
|
2011-06-07 15:17:34 -04:00
|
|
|
_PIPE = subprocess.PIPE # pylint: disable=E1101
|
2011-03-14 10:46:26 +01:00
|
|
|
obj = subprocess.Popen(cmd,
|
2011-06-03 11:08:43 -04:00
|
|
|
stdin=_PIPE,
|
|
|
|
stdout=_PIPE,
|
|
|
|
stderr=_PIPE,
|
2011-10-17 16:01:46 +00:00
|
|
|
close_fds=True,
|
|
|
|
shell=shell)
|
2011-02-18 22:10:06 +01:00
|
|
|
result = None
|
2011-04-18 15:53:09 -05:00
|
|
|
if process_input is not None:
|
2011-02-18 22:10:06 +01:00
|
|
|
result = obj.communicate(process_input)
|
|
|
|
else:
|
|
|
|
result = obj.communicate()
|
2011-06-07 15:17:34 -04:00
|
|
|
obj.stdin.close() # pylint: disable=E1101
|
|
|
|
_returncode = obj.returncode # pylint: disable=E1101
|
2011-06-03 11:08:43 -04:00
|
|
|
if _returncode:
|
|
|
|
LOG.debug(_('Result was %s') % _returncode)
|
2012-02-02 14:15:39 +09:00
|
|
|
if not ignore_exit_code and _returncode not in check_exit_code:
|
2011-02-18 22:10:06 +01:00
|
|
|
(stdout, stderr) = result
|
2011-04-20 12:08:22 -07:00
|
|
|
raise exception.ProcessExecutionError(
|
2011-06-03 11:08:43 -04:00
|
|
|
exit_code=_returncode,
|
2011-04-20 12:08:22 -07:00
|
|
|
stdout=stdout,
|
|
|
|
stderr=stderr,
|
|
|
|
cmd=' '.join(cmd))
|
2011-02-18 22:10:06 +01:00
|
|
|
return result
|
2011-04-20 12:08:22 -07:00
|
|
|
except exception.ProcessExecutionError:
|
2011-02-18 22:10:06 +01:00
|
|
|
if not attempts:
|
|
|
|
raise
|
|
|
|
else:
|
2011-04-20 12:08:22 -07:00
|
|
|
LOG.debug(_('%r failed. Retrying.'), cmd)
|
2011-03-14 10:46:26 +01:00
|
|
|
if delay_on_retry:
|
|
|
|
greenthread.sleep(random.randint(20, 200) / 100.0)
|
2011-03-23 13:39:01 -07:00
|
|
|
finally:
|
|
|
|
# NOTE(termie): this appears to be necessary to let the subprocess
|
|
|
|
# call clean something up in between calls, without
|
|
|
|
# it two execute calls in a row hangs the second one
|
|
|
|
greenthread.sleep(0)
|
2010-05-27 23:05:26 -07:00
|
|
|
|
2010-06-10 18:39:07 +01:00
|
|
|
|
Bug#898257 abstract out disk image access methods
Rather than providing two mutually exlusive image
access methods (loop and qemu-nbd), try each in turn.
This is to prepare for a follow up patch which will
add libguestfs as a method to try.
* nova/virt/mount.py: A new Mount class to abstract the
devce allocation, partition mapping and file sys mounting,
for each access type.
* nova/virt/disk/loop.py: A specialization of the base Mount class
to provide loop back mounting support.
* nova/virt/disk/nbd.py: A specialization of the base Mount class
to provide qemu-nbd mounting support.
* nova/virt/disk/api.py: A new file containing the nova.virt.disk
module interface.
(img_handlers): A new list of access methods to try,
with the order being honored.
(_DiskImage): An internal helper class that uses the plugin classes
above, to provide the operations available on a disk image file.
When mounting, iterate over each access method until one succeeds.
If a hint is provided about a CoW format image, the list of
methods to try will be reduced accordingly.
Note expected errors are no longer raised as exceptions during mounting.
Instead, on failure to mount an image, errors are collated and raised.
Interveining errors are logged in debug mode for successful mounts.
* nova/virt/libvirt/connection.py: Adjust the function parameter
names to be more general, rather than referencing specific
implementations like 'nbd' and 'tune2fs'.
Simplify the destroy_container() by storing and passing
back a reference to the _DiskImage object, which has the
necessary state to unmount.
* nova/utils.py (trycmd): A helper function to both deal with,
commands that issue ignorable warnings to stderr,
and commands that EXIT_SUCCESS while issuing errors to stderr.
nova/virt/xenapi/vm_utils.py: Adjust for the moved virt.disk package
Change-Id: If3a4b1c8f4e2f2e7300a21071340dcc839cb36d7
2011-11-30 17:00:17 +00:00
|
|
|
def trycmd(*args, **kwargs):
|
|
|
|
"""
|
|
|
|
A wrapper around execute() to more easily handle warnings and errors.
|
|
|
|
|
|
|
|
Returns an (out, err) tuple of strings containing the output of
|
|
|
|
the command's stdout and stderr. If 'err' is not empty then the
|
|
|
|
command can be considered to have failed.
|
|
|
|
|
|
|
|
:discard_warnings True | False. Defaults to False. If set to True,
|
|
|
|
then for succeeding commands, stderr is cleared
|
|
|
|
|
|
|
|
"""
|
|
|
|
discard_warnings = kwargs.pop('discard_warnings', False)
|
|
|
|
|
|
|
|
try:
|
|
|
|
out, err = execute(*args, **kwargs)
|
|
|
|
failed = False
|
|
|
|
except exception.ProcessExecutionError, exn:
|
|
|
|
out, err = '', str(exn)
|
|
|
|
LOG.debug(err)
|
|
|
|
failed = True
|
|
|
|
|
|
|
|
if not failed and discard_warnings and err:
|
|
|
|
# Handle commands that output to stderr but otherwise succeed
|
|
|
|
LOG.debug(err)
|
|
|
|
err = ''
|
|
|
|
|
|
|
|
return out, err
|
|
|
|
|
|
|
|
|
2011-02-04 13:07:17 -06:00
|
|
|
def ssh_execute(ssh, cmd, process_input=None,
|
|
|
|
addl_env=None, check_exit_code=True):
|
2011-04-20 12:08:22 -07:00
|
|
|
LOG.debug(_('Running cmd (SSH): %s'), ' '.join(cmd))
|
2011-02-04 13:07:17 -06:00
|
|
|
if addl_env:
|
2011-04-20 12:08:24 -07:00
|
|
|
raise exception.Error(_('Environment not supported over SSH'))
|
2011-02-04 13:07:17 -06:00
|
|
|
|
|
|
|
if process_input:
|
|
|
|
# This is (probably) fixable if we need it...
|
2011-04-20 12:08:24 -07:00
|
|
|
raise exception.Error(_('process_input not supported over SSH'))
|
2011-02-04 13:07:17 -06:00
|
|
|
|
|
|
|
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
|
|
|
|
channel = stdout_stream.channel
|
|
|
|
|
|
|
|
#stdin.write('process_input would go here')
|
|
|
|
#stdin.flush()
|
|
|
|
|
|
|
|
# NOTE(justinsb): This seems suspicious...
|
|
|
|
# ...other SSH clients have buffering issues with this approach
|
|
|
|
stdout = stdout_stream.read()
|
|
|
|
stderr = stderr_stream.read()
|
|
|
|
stdin_stream.close()
|
|
|
|
|
|
|
|
exit_status = channel.recv_exit_status()
|
|
|
|
|
|
|
|
# exit_status == -1 if no exit code was returned
|
|
|
|
if exit_status != -1:
|
2011-04-20 12:08:22 -07:00
|
|
|
LOG.debug(_('Result was %s') % exit_status)
|
2011-02-04 13:07:17 -06:00
|
|
|
if check_exit_code and exit_status != 0:
|
|
|
|
raise exception.ProcessExecutionError(exit_code=exit_status,
|
|
|
|
stdout=stdout,
|
|
|
|
stderr=stderr,
|
2011-03-08 01:01:41 -05:00
|
|
|
cmd=' '.join(cmd))
|
2011-02-04 13:07:17 -06:00
|
|
|
|
|
|
|
return (stdout, stderr)
|
2010-05-27 23:05:26 -07:00
|
|
|
|
2010-06-10 18:39:07 +01:00
|
|
|
|
2010-11-01 16:25:56 -07:00
|
|
|
def novadir():
|
|
|
|
import nova
|
2011-08-08 20:23:15 -04:00
|
|
|
return os.path.abspath(nova.__file__).split('nova/__init__.py')[0]
|
2010-09-17 19:28:10 -07:00
|
|
|
|
|
|
|
|
2011-06-22 09:33:46 -04:00
|
|
|
def default_flagfile(filename='nova.conf', args=None):
|
|
|
|
if args is None:
|
|
|
|
args = sys.argv
|
|
|
|
for arg in args:
|
2010-05-27 23:05:26 -07:00
|
|
|
if arg.find('flagfile') != -1:
|
2012-02-07 15:49:13 +00:00
|
|
|
return arg[arg.index('flagfile') + len('flagfile') + 1:]
|
2010-05-27 23:05:26 -07:00
|
|
|
else:
|
|
|
|
if not os.path.isabs(filename):
|
|
|
|
# turn relative filename into an absolute path
|
|
|
|
script_dir = os.path.dirname(inspect.stack()[-1][1])
|
|
|
|
filename = os.path.abspath(os.path.join(script_dir, filename))
|
2011-05-03 21:16:03 +04:00
|
|
|
if not os.path.exists(filename):
|
|
|
|
filename = "./nova.conf"
|
|
|
|
if not os.path.exists(filename):
|
|
|
|
filename = '/etc/nova/nova.conf'
|
2011-08-17 16:25:53 -07:00
|
|
|
if os.path.exists(filename):
|
|
|
|
flagfile = '--flagfile=%s' % filename
|
|
|
|
args.insert(1, flagfile)
|
2012-02-04 00:02:41 +01:00
|
|
|
return filename
|
2010-05-27 23:05:26 -07:00
|
|
|
|
2010-06-10 18:39:07 +01:00
|
|
|
|
2010-05-27 23:05:26 -07:00
|
|
|
def debug(arg):
|
2011-01-04 00:23:35 -05:00
|
|
|
LOG.debug(_('debug in callback: %s'), arg)
|
2010-05-27 23:05:26 -07:00
|
|
|
return arg
|
|
|
|
|
2010-06-10 18:39:07 +01:00
|
|
|
|
2010-05-27 23:05:26 -07:00
|
|
|
def generate_uid(topic, size=8):
|
2010-10-25 17:20:10 -07:00
|
|
|
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
|
|
|
|
choices = [random.choice(characters) for x in xrange(size)]
|
|
|
|
return '%s-%s' % (topic, ''.join(choices))
|
2010-05-27 23:05:26 -07:00
|
|
|
|
2010-06-10 18:39:07 +01:00
|
|
|
|
2011-03-15 11:24:07 -07:00
|
|
|
# Default symbols to use for passwords. Avoids visually confusing characters.
|
|
|
|
# ~6 bits per symbol
|
2011-12-29 17:23:27 +00:00
|
|
|
DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1
|
|
|
|
'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O
|
2011-04-20 12:08:22 -07:00
|
|
|
'abcdefghijkmnopqrstuvwxyz') # Removed: l
|
2011-03-15 11:24:07 -07:00
|
|
|
|
|
|
|
|
|
|
|
# ~5 bits per symbol
|
2011-12-29 17:23:27 +00:00
|
|
|
EASIER_PASSWORD_SYMBOLS = ('23456789', # Removed: 0, 1
|
2011-04-20 12:08:22 -07:00
|
|
|
'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O
|
2011-03-15 11:24:07 -07:00
|
|
|
|
|
|
|
|
2011-09-30 00:39:46 +00:00
|
|
|
def current_audit_period(unit=None):
|
2012-03-07 00:49:57 +00:00
|
|
|
"""This method gives you the most recently *completed* audit period.
|
|
|
|
|
|
|
|
arguments:
|
|
|
|
units: string, one of 'hour', 'day', 'month', 'year'
|
|
|
|
Periods normally begin at the beginning (UTC) of the
|
|
|
|
period unit (So a 'day' period begins at midnight UTC,
|
|
|
|
a 'month' unit on the 1st, a 'year' on Jan, 1)
|
|
|
|
unit string may be appended with an optional offset
|
|
|
|
like so: 'day@18' This will begin the period at 18:00
|
|
|
|
UTC. 'month@15' starts a monthly period on the 15th,
|
|
|
|
and year@3 begins a yearly one on March 1st.
|
|
|
|
|
|
|
|
|
|
|
|
returns: 2 tuple of datetimes (begin, end)
|
|
|
|
The begin timestamp of this audit period is the same as the
|
|
|
|
end of the previous."""
|
2011-09-30 00:39:46 +00:00
|
|
|
if not unit:
|
|
|
|
unit = FLAGS.instance_usage_audit_period
|
2012-03-07 00:49:57 +00:00
|
|
|
|
|
|
|
offset = 0
|
|
|
|
if '@' in unit:
|
|
|
|
unit, offset = unit.split("@", 1)
|
|
|
|
offset = int(offset)
|
|
|
|
|
2011-09-30 00:39:46 +00:00
|
|
|
rightnow = utcnow()
|
|
|
|
if unit not in ('month', 'day', 'year', 'hour'):
|
|
|
|
raise ValueError('Time period must be hour, day, month or year')
|
|
|
|
if unit == 'month':
|
2012-03-07 00:49:57 +00:00
|
|
|
if offset == 0:
|
|
|
|
offset = 1
|
|
|
|
end = datetime.datetime(day=offset,
|
2011-09-30 00:39:46 +00:00
|
|
|
month=rightnow.month,
|
|
|
|
year=rightnow.year)
|
2012-03-07 00:49:57 +00:00
|
|
|
if end >= rightnow:
|
|
|
|
year = rightnow.year
|
|
|
|
if 1 >= rightnow.month:
|
|
|
|
year -= 1
|
|
|
|
month = 12 + (rightnow.month - 1)
|
|
|
|
else:
|
|
|
|
month = rightnow.month - 1
|
|
|
|
end = datetime.datetime(day=offset,
|
|
|
|
month=month,
|
|
|
|
year=year)
|
|
|
|
year = end.year
|
|
|
|
if 1 >= end.month:
|
|
|
|
year -= 1
|
|
|
|
month = 12 + (end.month - 1)
|
|
|
|
else:
|
|
|
|
month = end.month - 1
|
|
|
|
begin = datetime.datetime(day=offset, month=month, year=year)
|
2011-09-30 00:39:46 +00:00
|
|
|
|
|
|
|
elif unit == 'year':
|
2012-03-07 00:49:57 +00:00
|
|
|
if offset == 0:
|
|
|
|
offset = 1
|
|
|
|
end = datetime.datetime(day=1, month=offset, year=rightnow.year)
|
|
|
|
if end >= rightnow:
|
|
|
|
end = datetime.datetime(day=1,
|
|
|
|
month=offset,
|
|
|
|
year=rightnow.year - 1)
|
|
|
|
begin = datetime.datetime(day=1,
|
|
|
|
month=offset,
|
|
|
|
year=rightnow.year - 2)
|
|
|
|
else:
|
|
|
|
begin = datetime.datetime(day=1,
|
|
|
|
month=offset,
|
|
|
|
year=rightnow.year - 1)
|
2011-09-30 00:39:46 +00:00
|
|
|
|
|
|
|
elif unit == 'day':
|
2012-03-07 00:49:57 +00:00
|
|
|
end = datetime.datetime(hour=offset,
|
|
|
|
day=rightnow.day,
|
2011-09-30 00:39:46 +00:00
|
|
|
month=rightnow.month,
|
|
|
|
year=rightnow.year)
|
2012-03-07 00:49:57 +00:00
|
|
|
if end >= rightnow:
|
|
|
|
end = end - datetime.timedelta(days=1)
|
|
|
|
begin = end - datetime.timedelta(days=1)
|
|
|
|
|
2011-09-30 00:39:46 +00:00
|
|
|
elif unit == 'hour':
|
2012-03-07 00:49:57 +00:00
|
|
|
end = rightnow.replace(minute=offset, second=0, microsecond=0)
|
|
|
|
if end >= rightnow:
|
|
|
|
end = end - datetime.timedelta(hours=1)
|
|
|
|
begin = end - datetime.timedelta(hours=1)
|
2011-09-30 00:39:46 +00:00
|
|
|
|
|
|
|
return (begin, end)
|
|
|
|
|
|
|
|
|
2012-01-04 11:10:10 -06:00
|
|
|
def usage_from_instance(instance_ref, network_info=None, **kw):
|
2011-11-14 15:30:14 -06:00
|
|
|
image_ref_url = "%s/images/%s" % (generate_glance_url(),
|
|
|
|
instance_ref['image_ref'])
|
|
|
|
|
2011-06-28 20:37:05 +00:00
|
|
|
usage_info = dict(
|
2011-09-30 00:39:46 +00:00
|
|
|
tenant_id=instance_ref['project_id'],
|
2011-06-28 20:37:05 +00:00
|
|
|
user_id=instance_ref['user_id'],
|
2011-10-28 16:40:54 -05:00
|
|
|
instance_id=instance_ref['uuid'],
|
2011-06-28 20:37:05 +00:00
|
|
|
instance_type=instance_ref['instance_type']['name'],
|
|
|
|
instance_type_id=instance_ref['instance_type_id'],
|
2011-12-13 15:27:41 -08:00
|
|
|
memory_mb=instance_ref['memory_mb'],
|
2012-01-06 12:57:37 -08:00
|
|
|
disk_gb=instance_ref['root_gb'] + instance_ref['ephemeral_gb'],
|
2011-06-28 20:37:05 +00:00
|
|
|
display_name=instance_ref['display_name'],
|
|
|
|
created_at=str(instance_ref['created_at']),
|
2012-02-02 14:15:39 +09:00
|
|
|
launched_at=str(instance_ref['launched_at'])
|
2011-06-28 20:37:05 +00:00
|
|
|
if instance_ref['launched_at'] else '',
|
2011-11-14 15:30:14 -06:00
|
|
|
image_ref_url=image_ref_url,
|
2011-09-30 00:39:46 +00:00
|
|
|
state=instance_ref['vm_state'],
|
2012-02-02 14:15:39 +09:00
|
|
|
state_description=instance_ref['task_state']
|
|
|
|
if instance_ref['task_state'] else '')
|
2012-01-04 11:10:10 -06:00
|
|
|
|
|
|
|
if network_info is not None:
|
2012-01-09 11:52:53 -06:00
|
|
|
usage_info['fixed_ips'] = network_info.fixed_ips()
|
2012-01-04 11:10:10 -06:00
|
|
|
|
2011-06-28 20:37:05 +00:00
|
|
|
usage_info.update(kw)
|
|
|
|
return usage_info
|
|
|
|
|
|
|
|
|
2011-12-29 17:23:27 +00:00
|
|
|
def generate_password(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
|
|
|
|
"""Generate a random password from the supplied symbol groups.
|
|
|
|
|
|
|
|
At least one symbol from each group will be included. Unpredictable
|
|
|
|
results if length is less than the number of symbol groups.
|
2011-03-14 20:48:33 -07:00
|
|
|
|
|
|
|
Believed to be reasonably secure (with a reasonable password length!)
|
2011-04-20 12:08:22 -07:00
|
|
|
|
2011-02-17 22:09:26 +00:00
|
|
|
"""
|
2011-03-15 11:24:07 -07:00
|
|
|
r = random.SystemRandom()
|
2011-12-29 17:23:27 +00:00
|
|
|
|
|
|
|
# NOTE(jerdfelt): Some password policies require at least one character
|
|
|
|
# from each group of symbols, so start off with one random character
|
|
|
|
# from each symbol group
|
|
|
|
password = [r.choice(s) for s in symbolgroups]
|
|
|
|
# If length < len(symbolgroups), the leading characters will only
|
|
|
|
# be from the first length groups. Try our best to not be predictable
|
|
|
|
# by shuffling and then truncating.
|
|
|
|
r.shuffle(password)
|
|
|
|
password = password[:length]
|
|
|
|
length -= len(password)
|
|
|
|
|
|
|
|
# then fill with random characters from all symbol groups
|
|
|
|
symbols = ''.join(symbolgroups)
|
|
|
|
password.extend([r.choice(symbols) for _i in xrange(length)])
|
|
|
|
|
|
|
|
# finally shuffle to ensure first x characters aren't from a
|
|
|
|
# predictable group
|
|
|
|
r.shuffle(password)
|
|
|
|
|
|
|
|
return ''.join(password)
|
2011-02-17 22:09:26 +00:00
|
|
|
|
|
|
|
|
2010-05-27 23:05:26 -07:00
|
|
|
def last_octet(address):
|
2011-04-20 12:08:22 -07:00
|
|
|
return int(address.split('.')[-1])
|
2010-05-27 23:05:26 -07:00
|
|
|
|
2010-06-10 18:39:07 +01:00
|
|
|
|
2011-09-30 00:39:46 +00:00
|
|
|
def get_my_linklocal(interface):
|
2010-12-24 20:38:49 +09:00
|
|
|
try:
|
2011-04-20 12:08:22 -07:00
|
|
|
if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface)
|
|
|
|
condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link'
|
2010-12-24 20:38:49 +09:00
|
|
|
links = [re.search(condition, x) for x in if_str[0].split('\n')]
|
|
|
|
address = [w.group(1) for w in links if w is not None]
|
|
|
|
if address[0] is not None:
|
|
|
|
return address[0]
|
|
|
|
else:
|
2011-04-20 12:08:22 -07:00
|
|
|
raise exception.Error(_('Link Local address is not found.:%s')
|
2011-01-19 11:13:33 +09:00
|
|
|
% if_str)
|
2011-01-18 11:41:05 +09:00
|
|
|
except Exception as ex:
|
2011-01-27 13:52:10 -06:00
|
|
|
raise exception.Error(_("Couldn't get Link Local IP of %(interface)s"
|
2011-04-20 12:08:22 -07:00
|
|
|
" :%(ex)s") % locals())
|
2010-12-24 20:38:49 +09:00
|
|
|
|
|
|
|
|
2010-12-15 11:23:33 -08:00
|
|
|
def utcnow():
|
2011-06-02 14:23:05 -07:00
|
|
|
"""Overridable version of utils.utcnow."""
|
2010-12-15 11:23:33 -08:00
|
|
|
if utcnow.override_time:
|
|
|
|
return utcnow.override_time
|
|
|
|
return datetime.datetime.utcnow()
|
|
|
|
|
|
|
|
|
|
|
|
utcnow.override_time = None
|
|
|
|
|
|
|
|
|
2011-03-23 17:15:41 -05:00
|
|
|
def is_older_than(before, seconds):
|
2011-04-20 12:08:22 -07:00
|
|
|
"""Return True if before is older than seconds."""
|
2011-03-24 10:30:09 -05:00
|
|
|
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
2011-03-23 14:41:35 -05:00
|
|
|
|
|
|
|
|
2010-12-15 11:23:33 -08:00
|
|
|
def utcnow_ts():
|
|
|
|
"""Timestamp version of our utcnow function."""
|
|
|
|
return time.mktime(utcnow().timetuple())
|
|
|
|
|
|
|
|
|
|
|
|
def set_time_override(override_time=datetime.datetime.utcnow()):
|
|
|
|
"""Override utils.utcnow to return a constant time."""
|
|
|
|
utcnow.override_time = override_time
|
|
|
|
|
|
|
|
|
|
|
|
def advance_time_delta(timedelta):
|
|
|
|
"""Advance overriden time using a datetime.timedelta."""
|
|
|
|
assert(not utcnow.override_time is None)
|
|
|
|
utcnow.override_time += timedelta
|
|
|
|
|
|
|
|
|
|
|
|
def advance_time_seconds(seconds):
|
|
|
|
"""Advance overriden time by seconds."""
|
|
|
|
advance_time_delta(datetime.timedelta(0, seconds))
|
|
|
|
|
|
|
|
|
|
|
|
def clear_time_override():
|
|
|
|
"""Remove the overridden time."""
|
|
|
|
utcnow.override_time = None
|
|
|
|
|
|
|
|
|
2011-07-21 22:46:36 +00:00
|
|
|
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
|
|
|
|
"""Returns formatted utcnow."""
|
2010-07-08 10:53:27 -07:00
|
|
|
if not at:
|
2010-12-15 11:23:33 -08:00
|
|
|
at = utcnow()
|
2011-07-21 22:46:36 +00:00
|
|
|
return at.strftime(fmt)
|
|
|
|
|
|
|
|
|
|
|
|
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
|
|
|
|
"""Turn a formatted time back into a datetime."""
|
|
|
|
return datetime.datetime.strptime(timestr, fmt)
|
|
|
|
|
|
|
|
|
|
|
|
def isotime(at=None):
|
2012-02-15 16:48:50 +00:00
|
|
|
"""Stringify time in ISO 8601 format"""
|
|
|
|
if not at:
|
|
|
|
at = datetime.datetime.utcnow()
|
|
|
|
str = at.strftime(ISO_TIME_FORMAT)
|
|
|
|
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
|
|
|
|
str += ('Z' if tz == 'UTC' else tz)
|
|
|
|
return str
|
2010-07-26 17:00:50 -04:00
|
|
|
|
2010-08-16 14:16:21 +02:00
|
|
|
|
2010-07-26 17:00:50 -04:00
|
|
|
def parse_isotime(timestr):
|
2011-04-20 12:08:22 -07:00
|
|
|
"""Turn an iso formatted time back into a datetime."""
|
2012-02-15 16:48:50 +00:00
|
|
|
try:
|
|
|
|
return iso8601.parse_date(timestr)
|
|
|
|
except (iso8601.ParseError, TypeError) as e:
|
|
|
|
raise ValueError(e.message)
|
|
|
|
|
|
|
|
|
|
|
|
def normalize_time(timestamp):
|
|
|
|
"""Normalize time in arbitrary timezone to UTC"""
|
|
|
|
offset = timestamp.utcoffset()
|
|
|
|
return timestamp.replace(tzinfo=None) - offset if offset else timestamp
|
2010-08-19 12:28:45 +02:00
|
|
|
|
|
|
|
|
2010-11-11 19:52:36 -06:00
|
|
|
def parse_mailmap(mailmap='.mailmap'):
|
|
|
|
mapping = {}
|
|
|
|
if os.path.exists(mailmap):
|
|
|
|
fp = open(mailmap, 'r')
|
|
|
|
for l in fp:
|
|
|
|
l = l.strip()
|
|
|
|
if not l.startswith('#') and ' ' in l:
|
|
|
|
canonical_email, alias = l.split(' ')
|
2012-01-06 17:49:57 +00:00
|
|
|
mapping[alias.lower()] = canonical_email.lower()
|
2010-11-11 19:52:36 -06:00
|
|
|
return mapping
|
|
|
|
|
2010-11-23 21:58:46 +01:00
|
|
|
|
2010-11-11 19:52:36 -06:00
|
|
|
def str_dict_replace(s, mapping):
|
|
|
|
for s1, s2 in mapping.iteritems():
|
|
|
|
s = s.replace(s1, s2)
|
|
|
|
return s
|
|
|
|
|
2010-08-19 12:28:45 +02:00
|
|
|
|
|
|
|
class LazyPluggable(object):
|
|
|
|
"""A pluggable backend loaded lazily based on some value."""
|
2010-08-27 23:10:57 -07:00
|
|
|
|
2010-08-19 12:28:45 +02:00
|
|
|
def __init__(self, pivot, **backends):
|
|
|
|
self.__backends = backends
|
|
|
|
self.__pivot = pivot
|
|
|
|
self.__backend = None
|
2010-08-27 23:10:57 -07:00
|
|
|
|
2010-08-19 12:28:45 +02:00
|
|
|
def __get_backend(self):
|
|
|
|
if not self.__backend:
|
2012-02-03 00:50:58 +00:00
|
|
|
backend_name = FLAGS[self.__pivot]
|
2010-08-19 12:28:45 +02:00
|
|
|
if backend_name not in self.__backends:
|
2010-12-11 15:23:40 -05:00
|
|
|
raise exception.Error(_('Invalid backend: %s') % backend_name)
|
2010-08-27 23:10:57 -07:00
|
|
|
|
2010-08-19 12:28:45 +02:00
|
|
|
backend = self.__backends[backend_name]
|
2012-01-02 17:31:36 +08:00
|
|
|
if isinstance(backend, tuple):
|
2010-08-19 12:28:45 +02:00
|
|
|
name = backend[0]
|
|
|
|
fromlist = backend[1]
|
|
|
|
else:
|
|
|
|
name = backend
|
|
|
|
fromlist = backend
|
2010-08-27 23:10:57 -07:00
|
|
|
|
2010-08-19 12:28:45 +02:00
|
|
|
self.__backend = __import__(name, None, None, fromlist)
|
2011-01-04 00:23:35 -05:00
|
|
|
LOG.debug(_('backend %s'), self.__backend)
|
2010-08-19 12:28:45 +02:00
|
|
|
return self.__backend
|
2010-08-27 23:10:57 -07:00
|
|
|
|
2010-08-19 12:28:45 +02:00
|
|
|
def __getattr__(self, key):
|
|
|
|
backend = self.__get_backend()
|
|
|
|
return getattr(backend, key)
|
|
|
|
|
2010-10-21 11:49:51 -07:00
|
|
|
|
2011-01-06 21:37:33 -06:00
|
|
|
class LoopingCallDone(Exception):
|
2011-04-20 12:08:22 -07:00
|
|
|
"""Exception to break out and stop a LoopingCall.
|
|
|
|
|
|
|
|
The poll-function passed to LoopingCall can raise this exception to
|
2011-01-06 23:38:01 -06:00
|
|
|
break out of the loop normally. This is somewhat analogous to
|
|
|
|
StopIteration.
|
2011-01-06 21:37:33 -06:00
|
|
|
|
|
|
|
An optional return-value can be included as the argument to the exception;
|
|
|
|
this return-value will be returned by LoopingCall.wait()
|
2011-04-20 12:08:22 -07:00
|
|
|
|
2011-01-06 21:37:33 -06:00
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, retvalue=True):
|
2011-04-20 12:08:22 -07:00
|
|
|
""":param retvalue: Value that LoopingCall.wait() should return."""
|
2011-01-06 21:37:33 -06:00
|
|
|
self.retvalue = retvalue
|
|
|
|
|
|
|
|
|
2010-10-25 03:45:19 +09:00
|
|
|
class LoopingCall(object):
|
|
|
|
def __init__(self, f=None, *args, **kw):
|
|
|
|
self.args = args
|
|
|
|
self.kw = kw
|
|
|
|
self.f = f
|
|
|
|
self._running = False
|
|
|
|
|
|
|
|
def start(self, interval, now=True):
|
|
|
|
self._running = True
|
|
|
|
done = event.Event()
|
2010-12-16 10:52:30 -08:00
|
|
|
|
2010-10-25 03:45:19 +09:00
|
|
|
def _inner():
|
|
|
|
if not now:
|
|
|
|
greenthread.sleep(interval)
|
|
|
|
try:
|
|
|
|
while self._running:
|
|
|
|
self.f(*self.args, **self.kw)
|
2011-05-12 20:12:22 +00:00
|
|
|
if not self._running:
|
|
|
|
break
|
2010-10-25 03:45:19 +09:00
|
|
|
greenthread.sleep(interval)
|
2011-01-06 21:37:33 -06:00
|
|
|
except LoopingCallDone, e:
|
|
|
|
self.stop()
|
|
|
|
done.send(e.retvalue)
|
2010-10-25 03:45:19 +09:00
|
|
|
except Exception:
|
2012-02-07 20:11:55 +00:00
|
|
|
LOG.exception(_('in looping call'))
|
2010-10-25 03:45:19 +09:00
|
|
|
done.send_exception(*sys.exc_info())
|
|
|
|
return
|
2011-01-06 21:37:33 -06:00
|
|
|
else:
|
|
|
|
done.send(True)
|
2010-12-09 15:19:56 -08:00
|
|
|
|
|
|
|
self.done = done
|
2010-12-16 10:52:30 -08:00
|
|
|
|
2010-10-25 03:45:19 +09:00
|
|
|
greenthread.spawn(_inner)
|
2010-12-09 15:19:56 -08:00
|
|
|
return self.done
|
2010-12-16 10:52:30 -08:00
|
|
|
|
2010-10-25 03:45:19 +09:00
|
|
|
def stop(self):
|
|
|
|
self._running = False
|
|
|
|
|
2010-12-09 15:19:56 -08:00
|
|
|
def wait(self):
|
|
|
|
return self.done.wait()
|
2010-10-25 19:21:09 +09:00
|
|
|
|
|
|
|
|
|
|
|
def xhtml_escape(value):
|
|
|
|
"""Escapes a string so it is valid within XML or XHTML.
|
2010-10-26 15:37:32 -07:00
|
|
|
|
2010-10-25 19:21:09 +09:00
|
|
|
"""
|
2012-02-19 19:15:12 -08:00
|
|
|
return saxutils.escape(value, {'"': '"', "'": '''})
|
2010-10-25 19:21:09 +09:00
|
|
|
|
|
|
|
|
|
|
|
def utf8(value):
|
|
|
|
"""Try to turn a string into utf-8 if possible.
|
|
|
|
|
|
|
|
Code is directly from the utf8 function in
|
|
|
|
http://github.com/facebook/tornado/blob/master/tornado/escape.py
|
|
|
|
|
|
|
|
"""
|
|
|
|
if isinstance(value, unicode):
|
2011-04-20 12:08:22 -07:00
|
|
|
return value.encode('utf-8')
|
2010-10-25 19:21:09 +09:00
|
|
|
assert isinstance(value, str)
|
|
|
|
return value
|
2010-12-22 17:53:42 -08:00
|
|
|
|
|
|
|
|
2011-07-29 12:09:17 -07:00
|
|
|
def to_primitive(value, convert_instances=False, level=0):
|
|
|
|
"""Convert a complex object into primitives.
|
|
|
|
|
|
|
|
Handy for JSON serialization. We can optionally handle instances,
|
|
|
|
but since this is a recursive function, we could have cyclical
|
|
|
|
data structures.
|
|
|
|
|
|
|
|
To handle cyclical data structures we could track the actual objects
|
|
|
|
visited in a set, but not all objects are hashable. Instead we just
|
|
|
|
track the depth of the object inspections and don't go too deep.
|
|
|
|
|
|
|
|
Therefore, convert_instances=True is lossy ... be aware.
|
|
|
|
|
|
|
|
"""
|
2011-08-19 10:01:25 -07:00
|
|
|
nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
|
|
|
inspect.isfunction, inspect.isgeneratorfunction,
|
|
|
|
inspect.isgenerator, inspect.istraceback, inspect.isframe,
|
|
|
|
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
|
|
|
inspect.isabstract]
|
|
|
|
for test in nasty:
|
|
|
|
if test(value):
|
|
|
|
return unicode(value)
|
2011-07-29 12:09:17 -07:00
|
|
|
|
2012-01-05 15:37:18 +00:00
|
|
|
# value of itertools.count doesn't get caught by inspects
|
|
|
|
# above and results in infinite loop when list(value) is called.
|
|
|
|
if type(value) == itertools.count:
|
|
|
|
return unicode(value)
|
|
|
|
|
2012-01-06 19:04:16 +00:00
|
|
|
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
|
|
|
|
# tests that raise an exception in a mocked method that
|
|
|
|
# has a @wrap_exception with a notifier will fail. If
|
|
|
|
# we up the dependency to 0.5.4 (when it is released) we
|
|
|
|
# can remove this workaround.
|
|
|
|
if getattr(value, '__module__', None) == 'mox':
|
2012-01-06 02:18:22 +00:00
|
|
|
return 'mock'
|
|
|
|
|
2011-07-29 12:09:17 -07:00
|
|
|
if level > 3:
|
2011-08-19 10:01:25 -07:00
|
|
|
return '?'
|
2011-07-29 12:09:17 -07:00
|
|
|
|
|
|
|
# The try block may not be necessary after the class check above,
|
|
|
|
# but just in case ...
|
|
|
|
try:
|
2012-01-02 17:31:36 +08:00
|
|
|
if isinstance(value, (list, tuple)):
|
2011-07-29 12:09:17 -07:00
|
|
|
o = []
|
|
|
|
for v in value:
|
|
|
|
o.append(to_primitive(v, convert_instances=convert_instances,
|
|
|
|
level=level))
|
|
|
|
return o
|
2012-01-02 17:31:36 +08:00
|
|
|
elif isinstance(value, dict):
|
2011-07-29 12:09:17 -07:00
|
|
|
o = {}
|
|
|
|
for k, v in value.iteritems():
|
|
|
|
o[k] = to_primitive(v, convert_instances=convert_instances,
|
|
|
|
level=level)
|
|
|
|
return o
|
|
|
|
elif isinstance(value, datetime.datetime):
|
|
|
|
return str(value)
|
|
|
|
elif hasattr(value, 'iteritems'):
|
|
|
|
return to_primitive(dict(value.iteritems()),
|
|
|
|
convert_instances=convert_instances,
|
|
|
|
level=level)
|
|
|
|
elif hasattr(value, '__iter__'):
|
|
|
|
return to_primitive(list(value), level)
|
|
|
|
elif convert_instances and hasattr(value, '__dict__'):
|
|
|
|
# Likely an instance of something. Watch for cycles.
|
|
|
|
# Ignore class member vars.
|
|
|
|
return to_primitive(value.__dict__,
|
|
|
|
convert_instances=convert_instances,
|
|
|
|
level=level + 1)
|
|
|
|
else:
|
|
|
|
return value
|
|
|
|
except TypeError, e:
|
|
|
|
# Class objects are tricky since they may define something like
|
|
|
|
# __iter__ defined but it isn't callable as list().
|
|
|
|
return unicode(value)
|
2010-12-22 17:53:42 -08:00
|
|
|
|
|
|
|
|
|
|
|
def dumps(value):
|
|
|
|
try:
|
|
|
|
return json.dumps(value)
|
|
|
|
except TypeError:
|
|
|
|
pass
|
|
|
|
return json.dumps(to_primitive(value))
|
|
|
|
|
|
|
|
|
|
|
|
def loads(s):
|
|
|
|
return json.loads(s)
|
2011-02-17 22:09:26 +00:00
|
|
|
|
|
|
|
|
2011-06-24 09:54:38 +04:00
|
|
|
try:
|
|
|
|
import anyjson
|
|
|
|
except ImportError:
|
|
|
|
pass
|
|
|
|
else:
|
2011-06-25 14:04:40 +04:00
|
|
|
anyjson._modules.append(("nova.utils", "dumps", TypeError,
|
|
|
|
"loads", ValueError))
|
2011-06-24 09:54:38 +04:00
|
|
|
anyjson.force_implementation("nova.utils")
|
|
|
|
|
|
|
|
|
2012-03-16 13:25:05 -07:00
|
|
|
class GreenLockFile(lockfile.FileLock):
|
|
|
|
"""Implementation of lockfile that allows for a lock per greenthread.
|
|
|
|
|
|
|
|
Simply implements lockfile:LockBase init with an addiontall suffix
|
|
|
|
on the unique name of the greenthread identifier
|
|
|
|
"""
|
|
|
|
def __init__(self, path, threaded=True):
|
|
|
|
self.path = path
|
|
|
|
self.lock_file = os.path.abspath(path) + ".lock"
|
|
|
|
self.hostname = socket.gethostname()
|
|
|
|
self.pid = os.getpid()
|
|
|
|
if threaded:
|
|
|
|
t = threading.current_thread()
|
|
|
|
# Thread objects in Python 2.4 and earlier do not have ident
|
|
|
|
# attrs. Worm around that.
|
|
|
|
ident = getattr(t, "ident", hash(t))
|
|
|
|
gident = corolocal.get_ident()
|
|
|
|
self.tname = "-%x-%x" % (ident & 0xffffffff, gident & 0xffffffff)
|
|
|
|
else:
|
|
|
|
self.tname = ""
|
|
|
|
dirname = os.path.dirname(self.lock_file)
|
|
|
|
self.unique_name = os.path.join(dirname,
|
|
|
|
"%s%s.%s" % (self.hostname,
|
|
|
|
self.tname,
|
|
|
|
self.pid))
|
|
|
|
|
|
|
|
|
2011-03-22 10:35:43 +01:00
|
|
|
_semaphores = {}
|
|
|
|
|
|
|
|
|
|
|
|
def synchronized(name, external=False):
|
2011-04-20 12:08:22 -07:00
|
|
|
"""Synchronization decorator.
|
2011-03-22 10:35:43 +01:00
|
|
|
|
2012-03-05 14:33:40 -05:00
|
|
|
Decorating a method like so::
|
|
|
|
|
|
|
|
@synchronized('mylock')
|
|
|
|
def foo(self, *args):
|
|
|
|
...
|
2011-03-22 10:35:43 +01:00
|
|
|
|
|
|
|
ensures that only one thread will execute the bar method at a time.
|
|
|
|
|
2012-03-05 14:33:40 -05:00
|
|
|
Different methods can share the same lock::
|
|
|
|
|
|
|
|
@synchronized('mylock')
|
|
|
|
def foo(self, *args):
|
|
|
|
...
|
2011-03-22 10:35:43 +01:00
|
|
|
|
2012-03-05 14:33:40 -05:00
|
|
|
@synchronized('mylock')
|
|
|
|
def bar(self, *args):
|
|
|
|
...
|
2011-03-22 10:35:43 +01:00
|
|
|
|
|
|
|
This way only one of either foo or bar can be executing at a time.
|
|
|
|
|
|
|
|
The external keyword argument denotes whether this lock should work across
|
|
|
|
multiple processes. This means that if two different workers both run a
|
|
|
|
a method decorated with @synchronized('mylock', external=True), only one
|
|
|
|
of them will execute at a time.
|
2011-04-20 12:08:22 -07:00
|
|
|
|
2012-03-16 13:25:05 -07:00
|
|
|
Important limitation: you can only have one external lock running per
|
|
|
|
thread at a time. For example the following will fail:
|
|
|
|
|
|
|
|
@utils.synchronized('testlock1', external=True)
|
|
|
|
def outer_lock():
|
|
|
|
|
|
|
|
@utils.synchronized('testlock2', external=True)
|
|
|
|
def inner_lock():
|
|
|
|
pass
|
|
|
|
inner_lock()
|
|
|
|
|
|
|
|
outer_lock()
|
|
|
|
|
2011-02-17 22:09:26 +00:00
|
|
|
"""
|
2011-03-22 10:35:43 +01:00
|
|
|
|
2011-02-28 12:37:02 +01:00
|
|
|
def wrap(f):
|
2011-03-01 20:49:46 +01:00
|
|
|
@functools.wraps(f)
|
2011-02-28 12:37:02 +01:00
|
|
|
def inner(*args, **kwargs):
|
2011-03-22 14:14:47 +01:00
|
|
|
# NOTE(soren): If we ever go natively threaded, this will be racy.
|
2012-02-02 14:15:39 +09:00
|
|
|
# See http://stackoverflow.com/questions/5390569/dyn
|
2011-03-22 14:14:47 +01:00
|
|
|
# amically-allocating-and-destroying-mutexes
|
|
|
|
if name not in _semaphores:
|
|
|
|
_semaphores[name] = semaphore.Semaphore()
|
|
|
|
sem = _semaphores[name]
|
2011-03-22 10:35:43 +01:00
|
|
|
LOG.debug(_('Attempting to grab semaphore "%(lock)s" for method '
|
2012-03-04 19:06:31 +00:00
|
|
|
'"%(method)s"...') % {'lock': name,
|
|
|
|
'method': f.__name__})
|
2011-03-22 10:35:43 +01:00
|
|
|
with sem:
|
2011-12-09 14:43:38 +01:00
|
|
|
LOG.debug(_('Got semaphore "%(lock)s" for method '
|
2012-03-04 19:06:31 +00:00
|
|
|
'"%(method)s"...') % {'lock': name,
|
|
|
|
'method': f.__name__})
|
2012-02-02 11:38:25 -05:00
|
|
|
if external and not FLAGS.disable_process_locking:
|
2011-03-22 10:35:43 +01:00
|
|
|
LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
|
2012-03-04 19:06:31 +00:00
|
|
|
'method "%(method)s"...') %
|
|
|
|
{'lock': name, 'method': f.__name__})
|
2011-03-22 10:35:43 +01:00
|
|
|
lock_file_path = os.path.join(FLAGS.lock_path,
|
2012-02-01 19:20:38 +00:00
|
|
|
'nova-%s' % name)
|
2012-03-16 13:25:05 -07:00
|
|
|
lock = GreenLockFile(lock_file_path)
|
2012-02-02 11:38:25 -05:00
|
|
|
with lock:
|
2011-12-09 14:43:38 +01:00
|
|
|
LOG.debug(_('Got file lock "%(lock)s" for '
|
2012-03-04 19:06:31 +00:00
|
|
|
'method "%(method)s"...') %
|
|
|
|
{'lock': name, 'method': f.__name__})
|
2012-02-02 11:38:25 -05:00
|
|
|
retval = f(*args, **kwargs)
|
|
|
|
else:
|
2011-03-22 14:14:47 +01:00
|
|
|
retval = f(*args, **kwargs)
|
2011-03-22 10:35:43 +01:00
|
|
|
|
2011-03-22 14:14:47 +01:00
|
|
|
# If no-one else is waiting for it, delete it.
|
|
|
|
# See note about possible raciness above.
|
|
|
|
if not sem.balance < 1:
|
|
|
|
del _semaphores[name]
|
|
|
|
|
|
|
|
return retval
|
2011-02-28 12:37:02 +01:00
|
|
|
return inner
|
|
|
|
return wrap
|
2011-02-23 12:05:49 -08:00
|
|
|
|
|
|
|
|
2012-02-24 09:56:26 -08:00
|
|
|
def cleanup_file_locks():
|
|
|
|
"""clean up stale locks left behind by process failures
|
|
|
|
|
|
|
|
The lockfile module, used by @synchronized, can leave stale lockfiles
|
|
|
|
behind after process failure. These locks can cause process hangs
|
|
|
|
at startup, when a process deadlocks on a lock which will never
|
|
|
|
be unlocked.
|
|
|
|
|
|
|
|
Intended to be called at service startup.
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
# NOTE(mikeyp) this routine incorporates some internal knowledge
|
|
|
|
# from the lockfile module, and this logic really
|
|
|
|
# should be part of that module.
|
|
|
|
#
|
|
|
|
# cleanup logic:
|
|
|
|
# 1) look for the lockfile modules's 'sentinel' files, of the form
|
|
|
|
# hostname.[thread-.*]-pid, extract the pid.
|
|
|
|
# if pid doesn't match a running process, delete the file since
|
|
|
|
# it's from a dead process.
|
|
|
|
# 2) check for the actual lockfiles. if lockfile exists with linkcount
|
|
|
|
# of 1, it's bogus, so delete it. A link count >= 2 indicates that
|
|
|
|
# there are probably sentinels still linked to it from active
|
|
|
|
# processes. This check isn't perfect, but there is no way to
|
|
|
|
# reliably tell which sentinels refer to which lock in the
|
|
|
|
# lockfile implementation.
|
|
|
|
|
2012-02-02 10:07:50 +00:00
|
|
|
if FLAGS.disable_process_locking:
|
2012-02-24 09:56:26 -08:00
|
|
|
return
|
|
|
|
|
|
|
|
hostname = socket.gethostname()
|
|
|
|
sentinel_re = hostname + r'\..*-(\d+$)'
|
|
|
|
lockfile_re = r'nova-.*\.lock'
|
|
|
|
files = os.listdir(FLAGS.lock_path)
|
|
|
|
|
|
|
|
# cleanup sentinels
|
|
|
|
for filename in files:
|
|
|
|
match = re.match(sentinel_re, filename)
|
|
|
|
if match is None:
|
|
|
|
continue
|
|
|
|
pid = match.group(1)
|
2012-03-04 19:06:31 +00:00
|
|
|
LOG.debug(_('Found sentinel %(filename)s for pid %(pid)s') %
|
|
|
|
{'filename': filename, 'pid': pid})
|
2012-02-27 17:20:21 -08:00
|
|
|
try:
|
|
|
|
os.kill(int(pid), 0)
|
|
|
|
except OSError, e:
|
|
|
|
# PID wasn't found
|
2012-02-24 09:56:26 -08:00
|
|
|
delete_if_exists(os.path.join(FLAGS.lock_path, filename))
|
2012-03-04 19:06:31 +00:00
|
|
|
LOG.debug(_('Cleaned sentinel %(filename)s for pid %(pid)s') %
|
|
|
|
{'filename': filename, 'pid': pid})
|
2012-02-24 09:56:26 -08:00
|
|
|
|
|
|
|
# cleanup lock files
|
|
|
|
for filename in files:
|
|
|
|
match = re.match(lockfile_re, filename)
|
|
|
|
if match is None:
|
|
|
|
continue
|
|
|
|
try:
|
|
|
|
stat_info = os.stat(os.path.join(FLAGS.lock_path, filename))
|
|
|
|
except OSError as (errno, strerror):
|
|
|
|
if errno == 2: # doesn't exist
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
raise
|
2012-03-04 19:06:31 +00:00
|
|
|
msg = (_('Found lockfile %(file)s with link count %(count)d') %
|
|
|
|
{'file': filename, 'count': stat_info.st_nlink})
|
2012-02-24 09:56:26 -08:00
|
|
|
LOG.debug(msg)
|
|
|
|
if stat_info.st_nlink == 1:
|
|
|
|
delete_if_exists(os.path.join(FLAGS.lock_path, filename))
|
2012-03-04 19:06:31 +00:00
|
|
|
msg = (_('Cleaned lockfile %(file)s with link count %(count)d') %
|
|
|
|
{'file': filename, 'count': stat_info.st_nlink})
|
2012-02-24 09:56:26 -08:00
|
|
|
LOG.debug(msg)
|
|
|
|
|
|
|
|
|
|
|
|
def delete_if_exists(pathname):
|
|
|
|
"""delete a file, but ignore file not found error"""
|
|
|
|
|
|
|
|
try:
|
|
|
|
os.unlink(pathname)
|
|
|
|
except OSError as (errno, strerror):
|
|
|
|
if errno == 2: # doesn't exist
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
2011-02-23 14:07:08 -08:00
|
|
|
def get_from_path(items, path):
|
2011-04-20 12:08:22 -07:00
|
|
|
"""Returns a list of items matching the specified path.
|
|
|
|
|
|
|
|
Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item
|
|
|
|
in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the
|
2011-02-23 14:07:08 -08:00
|
|
|
intermediate results are lists it will treat each list item individually.
|
|
|
|
A 'None' in items or any child expressions will be ignored, this function
|
|
|
|
will not throw because of None (anywhere) in items. The returned list
|
2011-04-20 12:08:22 -07:00
|
|
|
will contain no None values.
|
2011-02-23 12:05:49 -08:00
|
|
|
|
2011-04-20 12:08:22 -07:00
|
|
|
"""
|
2011-02-23 14:07:08 -08:00
|
|
|
if path is None:
|
2011-04-20 12:08:22 -07:00
|
|
|
raise exception.Error('Invalid mini_xpath')
|
2011-02-23 12:05:49 -08:00
|
|
|
|
2011-04-20 12:08:22 -07:00
|
|
|
(first_token, sep, remainder) = path.partition('/')
|
2011-02-23 12:05:49 -08:00
|
|
|
|
2011-04-20 12:08:22 -07:00
|
|
|
if first_token == '':
|
|
|
|
raise exception.Error('Invalid mini_xpath')
|
2011-02-23 12:05:49 -08:00
|
|
|
|
|
|
|
results = []
|
|
|
|
|
|
|
|
if items is None:
|
|
|
|
return results
|
|
|
|
|
2012-01-02 17:31:36 +08:00
|
|
|
if not isinstance(items, list):
|
2011-02-23 12:36:09 -08:00
|
|
|
# Wrap single objects in a list
|
|
|
|
items = [items]
|
|
|
|
|
2011-02-23 12:05:49 -08:00
|
|
|
for item in items:
|
|
|
|
if item is None:
|
|
|
|
continue
|
2011-04-20 12:08:22 -07:00
|
|
|
get_method = getattr(item, 'get', None)
|
2011-02-23 12:05:49 -08:00
|
|
|
if get_method is None:
|
|
|
|
continue
|
|
|
|
child = get_method(first_token)
|
|
|
|
if child is None:
|
|
|
|
continue
|
2012-01-02 17:31:36 +08:00
|
|
|
if isinstance(child, list):
|
2011-02-23 12:05:49 -08:00
|
|
|
# Flatten intermediate lists
|
|
|
|
for x in child:
|
|
|
|
results.append(x)
|
|
|
|
else:
|
|
|
|
results.append(child)
|
|
|
|
|
|
|
|
if not sep:
|
|
|
|
# No more tokens
|
|
|
|
return results
|
|
|
|
else:
|
2011-02-23 14:07:08 -08:00
|
|
|
return get_from_path(results, remainder)
|
2011-03-22 20:26:45 +00:00
|
|
|
|
|
|
|
|
|
|
|
def flatten_dict(dict_, flattened=None):
|
2011-04-20 12:08:22 -07:00
|
|
|
"""Recursively flatten a nested dictionary."""
|
2011-03-22 20:26:45 +00:00
|
|
|
flattened = flattened or {}
|
|
|
|
for key, value in dict_.iteritems():
|
|
|
|
if hasattr(value, 'iteritems'):
|
|
|
|
flatten_dict(value, flattened)
|
|
|
|
else:
|
|
|
|
flattened[key] = value
|
|
|
|
return flattened
|
|
|
|
|
|
|
|
|
|
|
|
def partition_dict(dict_, keys):
|
2011-04-20 12:08:22 -07:00
|
|
|
"""Return two dicts, one with `keys` the other with everything else."""
|
2011-03-22 20:26:45 +00:00
|
|
|
intersection = {}
|
|
|
|
difference = {}
|
|
|
|
for key, value in dict_.iteritems():
|
|
|
|
if key in keys:
|
|
|
|
intersection[key] = value
|
|
|
|
else:
|
|
|
|
difference[key] = value
|
|
|
|
return intersection, difference
|
|
|
|
|
|
|
|
|
|
|
|
def map_dict_keys(dict_, key_map):
|
2011-04-20 12:08:22 -07:00
|
|
|
"""Return a dict in which the dictionaries keys are mapped to new keys."""
|
2011-03-22 20:26:45 +00:00
|
|
|
mapped = {}
|
|
|
|
for key, value in dict_.iteritems():
|
|
|
|
mapped_key = key_map[key] if key in key_map else key
|
|
|
|
mapped[mapped_key] = value
|
|
|
|
return mapped
|
2011-03-23 05:50:53 +00:00
|
|
|
|
|
|
|
|
|
|
|
def subset_dict(dict_, keys):
|
2011-04-20 12:08:22 -07:00
|
|
|
"""Return a dict that only contains a subset of keys."""
|
2011-03-23 05:50:53 +00:00
|
|
|
subset = partition_dict(dict_, keys)[0]
|
|
|
|
return subset
|
2011-03-24 21:13:55 +00:00
|
|
|
|
|
|
|
|
2011-03-22 21:42:17 -07:00
|
|
|
def check_isinstance(obj, cls):
|
2011-04-20 12:08:22 -07:00
|
|
|
"""Checks that obj is of type cls, and lets PyLint infer types."""
|
2011-03-14 14:17:58 -07:00
|
|
|
if isinstance(obj, cls):
|
|
|
|
return obj
|
2011-04-20 12:08:22 -07:00
|
|
|
raise Exception(_('Expected object of type: %s') % (str(cls)))
|
2011-03-22 22:45:15 -07:00
|
|
|
# TODO(justinsb): Can we make this better??
|
2011-03-14 14:17:58 -07:00
|
|
|
return cls() # Ugly PyLint hack
|
2011-04-22 21:35:54 +09:00
|
|
|
|
|
|
|
|
|
|
|
def parse_server_string(server_str):
|
|
|
|
"""
|
|
|
|
Parses the given server_string and returns a list of host and port.
|
|
|
|
If it's not a combination of host part and port, the port element
|
|
|
|
is a null string. If the input is invalid expression, return a null
|
|
|
|
list.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
# First of all, exclude pure IPv6 address (w/o port).
|
|
|
|
if netaddr.valid_ipv6(server_str):
|
|
|
|
return (server_str, '')
|
|
|
|
|
|
|
|
# Next, check if this is IPv6 address with a port number combination.
|
|
|
|
if server_str.find("]:") != -1:
|
|
|
|
(address, port) = server_str.replace('[', '', 1).split(']:')
|
|
|
|
return (address, port)
|
|
|
|
|
|
|
|
# Third, check if this is a combination of an address and a port
|
|
|
|
if server_str.find(':') == -1:
|
|
|
|
return (server_str, '')
|
|
|
|
|
|
|
|
# This must be a combination of an address and a port
|
|
|
|
(address, port) = server_str.split(':')
|
|
|
|
return (address, port)
|
|
|
|
|
2011-08-03 19:17:08 -04:00
|
|
|
except Exception:
|
2012-03-04 19:06:31 +00:00
|
|
|
LOG.debug(_('Invalid server_string: %s') % server_str)
|
2011-04-22 21:35:54 +09:00
|
|
|
return ('', '')
|
2011-06-15 20:11:34 +00:00
|
|
|
|
|
|
|
|
|
|
|
def gen_uuid():
|
|
|
|
return uuid.uuid4()
|
2011-06-15 21:12:37 +00:00
|
|
|
|
|
|
|
|
|
|
|
def is_uuid_like(val):
|
2011-11-09 13:01:35 -08:00
|
|
|
"""For our purposes, a UUID is a string in canonical form:
|
2011-06-18 00:12:44 +00:00
|
|
|
|
|
|
|
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
|
|
|
|
"""
|
2011-12-19 14:51:31 -05:00
|
|
|
try:
|
|
|
|
uuid.UUID(val)
|
|
|
|
return True
|
|
|
|
except (TypeError, ValueError, AttributeError):
|
2011-06-15 21:12:37 +00:00
|
|
|
return False
|
2011-06-23 21:31:00 -04:00
|
|
|
|
|
|
|
|
2011-06-29 15:22:56 -07:00
|
|
|
def bool_from_str(val):
|
|
|
|
"""Convert a string representation of a bool into a bool value"""
|
|
|
|
|
|
|
|
if not val:
|
|
|
|
return False
|
|
|
|
try:
|
|
|
|
return True if int(val) else False
|
|
|
|
except ValueError:
|
|
|
|
return val.lower() == 'true'
|
|
|
|
|
|
|
|
|
2011-07-11 13:34:39 -07:00
|
|
|
def is_valid_ipv4(address):
|
|
|
|
"""valid the address strictly as per format xxx.xxx.xxx.xxx.
|
|
|
|
where xxx is a value between 0 and 255.
|
|
|
|
"""
|
|
|
|
parts = address.split(".")
|
|
|
|
if len(parts) != 4:
|
|
|
|
return False
|
|
|
|
for item in parts:
|
|
|
|
try:
|
|
|
|
if not 0 <= int(item) <= 255:
|
|
|
|
return False
|
|
|
|
except ValueError:
|
|
|
|
return False
|
|
|
|
return True
|
2011-08-23 08:46:55 -07:00
|
|
|
|
|
|
|
|
2011-09-30 15:10:33 +01:00
|
|
|
def is_valid_cidr(address):
|
|
|
|
"""Check if the provided ipv4 or ipv6 address is a valid
|
|
|
|
CIDR address or not"""
|
|
|
|
try:
|
|
|
|
# Validate the correct CIDR Address
|
|
|
|
netaddr.IPNetwork(address)
|
|
|
|
except netaddr.core.AddrFormatError:
|
|
|
|
return False
|
|
|
|
|
|
|
|
# Prior validation partially verify /xx part
|
|
|
|
# Verify it here
|
|
|
|
ip_segment = address.split('/')
|
|
|
|
|
|
|
|
if (len(ip_segment) <= 1 or
|
|
|
|
ip_segment[1] == ''):
|
|
|
|
return False
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
2011-08-19 12:30:55 -07:00
|
|
|
def monkey_patch():
|
2011-08-22 14:24:37 -07:00
|
|
|
""" If the Flags.monkey_patch set as True,
|
2011-11-09 13:01:35 -08:00
|
|
|
this function patches a decorator
|
2011-08-22 14:24:37 -07:00
|
|
|
for all functions in specified modules.
|
|
|
|
You can set decorators for each modules
|
|
|
|
using FLAGS.monkey_patch_modules.
|
|
|
|
The format is "Module path:Decorator function".
|
|
|
|
Example: 'nova.api.ec2.cloud:nova.notifier.api.notify_decorator'
|
|
|
|
|
|
|
|
Parameters of the decorator is as follows.
|
|
|
|
(See nova.notifier.api.notify_decorator)
|
|
|
|
|
|
|
|
name - name of the function
|
|
|
|
function - object of the function
|
|
|
|
"""
|
|
|
|
# If FLAGS.monkey_patch is not True, this function do nothing.
|
2011-08-19 12:30:55 -07:00
|
|
|
if not FLAGS.monkey_patch:
|
|
|
|
return
|
2011-08-23 08:07:25 -07:00
|
|
|
# Get list of modules and decorators
|
2011-08-19 12:30:55 -07:00
|
|
|
for module_and_decorator in FLAGS.monkey_patch_modules:
|
|
|
|
module, decorator_name = module_and_decorator.split(':')
|
2011-08-22 14:24:37 -07:00
|
|
|
# import decorator function
|
2011-08-19 12:30:55 -07:00
|
|
|
decorator = import_class(decorator_name)
|
|
|
|
__import__(module)
|
2011-08-23 08:07:25 -07:00
|
|
|
# Retrieve module information using pyclbr
|
2011-08-19 12:30:55 -07:00
|
|
|
module_data = pyclbr.readmodule_ex(module)
|
|
|
|
for key in module_data.keys():
|
2011-08-22 14:24:37 -07:00
|
|
|
# set the decorator for the class methods
|
2011-08-19 12:30:55 -07:00
|
|
|
if isinstance(module_data[key], pyclbr.Class):
|
|
|
|
clz = import_class("%s.%s" % (module, key))
|
|
|
|
for method, func in inspect.getmembers(clz, inspect.ismethod):
|
2012-01-13 16:03:45 +05:30
|
|
|
setattr(clz, method,
|
2011-08-22 14:24:37 -07:00
|
|
|
decorator("%s.%s.%s" % (module, key, method), func))
|
|
|
|
# set the decorator for the function
|
2011-08-19 12:30:55 -07:00
|
|
|
if isinstance(module_data[key], pyclbr.Function):
|
|
|
|
func = import_class("%s.%s" % (module, key))
|
2012-01-13 16:03:45 +05:30
|
|
|
setattr(sys.modules[module], key,
|
2011-08-22 14:24:37 -07:00
|
|
|
decorator("%s.%s" % (module, key), func))
|
2011-08-31 17:06:15 -07:00
|
|
|
|
|
|
|
|
2011-09-02 11:00:33 -07:00
|
|
|
def convert_to_list_dict(lst, label):
|
|
|
|
"""Convert a value or list into a list of dicts"""
|
|
|
|
if not lst:
|
2011-08-31 17:06:15 -07:00
|
|
|
return None
|
|
|
|
if not isinstance(lst, list):
|
|
|
|
lst = [lst]
|
|
|
|
return [{label: x} for x in lst]
|
2011-09-14 23:11:03 +00:00
|
|
|
|
|
|
|
|
|
|
|
def timefunc(func):
|
|
|
|
"""Decorator that logs how long a particular function took to execute"""
|
|
|
|
@functools.wraps(func)
|
|
|
|
def inner(*args, **kwargs):
|
|
|
|
start_time = time.time()
|
|
|
|
try:
|
|
|
|
return func(*args, **kwargs)
|
|
|
|
finally:
|
|
|
|
total_time = time.time() - start_time
|
|
|
|
LOG.debug(_("timefunc: '%(name)s' took %(total_time).2f secs") %
|
|
|
|
dict(name=func.__name__, total_time=total_time))
|
|
|
|
return inner
|
2011-09-21 15:59:40 -05:00
|
|
|
|
|
|
|
|
2011-09-20 15:21:06 -05:00
|
|
|
def generate_glance_url():
|
|
|
|
"""Generate the URL to glance."""
|
|
|
|
# TODO(jk0): This will eventually need to take SSL into consideration
|
|
|
|
# when supported in glance.
|
|
|
|
return "http://%s:%d" % (FLAGS.glance_host, FLAGS.glance_port)
|
2011-10-10 17:58:56 -05:00
|
|
|
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
2011-10-31 15:11:36 +00:00
|
|
|
def save_and_reraise_exception():
|
|
|
|
"""Save current exception, run some code and then re-raise.
|
2011-10-10 17:58:56 -05:00
|
|
|
|
2011-10-31 15:11:36 +00:00
|
|
|
In some cases the exception context can be cleared, resulting in None
|
|
|
|
being attempted to be reraised after an exception handler is run. This
|
|
|
|
can happen when eventlet switches greenthreads or when running an
|
2012-02-02 10:07:50 +00:00
|
|
|
exception handler, code raises and catches an exception. In both
|
2011-10-31 15:11:36 +00:00
|
|
|
cases the exception context will be cleared.
|
2011-10-10 17:58:56 -05:00
|
|
|
|
|
|
|
To work around this, we save the exception state, run handler code, and
|
2011-10-31 15:11:36 +00:00
|
|
|
then re-raise the original exception. If another exception occurs, the
|
|
|
|
saved exception is logged and the new exception is reraised.
|
2011-10-10 17:58:56 -05:00
|
|
|
"""
|
|
|
|
type_, value, traceback = sys.exc_info()
|
|
|
|
try:
|
|
|
|
yield
|
2011-12-27 12:07:06 -08:00
|
|
|
except Exception:
|
2012-02-13 16:26:31 -06:00
|
|
|
# NOTE(jkoelker): Using LOG.error here since it accepts exc_info
|
|
|
|
# as a kwargs.
|
|
|
|
LOG.error(_('Original exception being dropped'),
|
|
|
|
exc_info=(type_, value, traceback))
|
2011-10-31 15:11:36 +00:00
|
|
|
raise
|
|
|
|
raise type_, value, traceback
|
2011-10-10 17:58:56 -05:00
|
|
|
|
|
|
|
|
2011-11-08 20:23:14 -06:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def logging_error(message):
|
|
|
|
"""Catches exception, write message to the log, re-raise.
|
|
|
|
This is a common refinement of save_and_reraise that writes a specific
|
|
|
|
message to the log.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
except Exception as error:
|
|
|
|
with save_and_reraise_exception():
|
|
|
|
LOG.exception(message)
|
|
|
|
|
|
|
|
|
2011-10-10 17:58:56 -05:00
|
|
|
def make_dev_path(dev, partition=None, base='/dev'):
|
|
|
|
"""Return a path to a particular device.
|
|
|
|
|
|
|
|
>>> make_dev_path('xvdc')
|
|
|
|
/dev/xvdc
|
|
|
|
|
|
|
|
>>> make_dev_path('xvdc', 1)
|
|
|
|
/dev/xvdc1
|
|
|
|
"""
|
|
|
|
path = os.path.join(base, dev)
|
|
|
|
if partition:
|
|
|
|
path += str(partition)
|
|
|
|
return path
|
2011-08-05 16:35:56 +01:00
|
|
|
|
|
|
|
|
|
|
|
def total_seconds(td):
|
|
|
|
"""Local total_seconds implementation for compatibility with python 2.6"""
|
|
|
|
if hasattr(td, 'total_seconds'):
|
|
|
|
return td.total_seconds()
|
|
|
|
else:
|
|
|
|
return ((td.days * 86400 + td.seconds) * 10 ** 6 +
|
|
|
|
td.microseconds) / 10.0 ** 6
|
2011-11-09 18:16:24 -05:00
|
|
|
|
|
|
|
|
|
|
|
def sanitize_hostname(hostname):
|
|
|
|
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
|
|
|
|
if isinstance(hostname, unicode):
|
|
|
|
hostname = hostname.encode('latin-1', 'ignore')
|
|
|
|
|
|
|
|
hostname = re.sub('[ _]', '-', hostname)
|
|
|
|
hostname = re.sub('[^\w.-]+', '', hostname)
|
|
|
|
hostname = hostname.lower()
|
|
|
|
hostname = hostname.strip('.-')
|
|
|
|
|
|
|
|
return hostname
|
2011-12-02 14:18:38 +01:00
|
|
|
|
|
|
|
|
2012-01-04 18:40:46 -08:00
|
|
|
def read_cached_file(filename, cache_info, reload_func=None):
|
|
|
|
"""Read from a file if it has been modified.
|
|
|
|
|
|
|
|
:param cache_info: dictionary to hold opaque cache.
|
|
|
|
:param reload_func: optional function to be called with data when
|
|
|
|
file is reloaded due to a modification.
|
|
|
|
|
|
|
|
:returns: data from file
|
|
|
|
|
2011-12-02 14:18:38 +01:00
|
|
|
"""
|
|
|
|
mtime = os.path.getmtime(filename)
|
2012-01-04 18:40:46 -08:00
|
|
|
if not cache_info or mtime != cache_info.get('mtime'):
|
|
|
|
with open(filename) as fap:
|
|
|
|
cache_info['data'] = fap.read()
|
|
|
|
cache_info['mtime'] = mtime
|
|
|
|
if reload_func:
|
|
|
|
reload_func(cache_info['data'])
|
|
|
|
return cache_info['data']
|
2011-12-21 22:40:23 +00:00
|
|
|
|
|
|
|
|
2012-01-09 18:21:57 +11:00
|
|
|
def hash_file(file_like_object):
|
|
|
|
"""Generate a hash for the contents of a file."""
|
|
|
|
checksum = hashlib.sha1()
|
|
|
|
any(map(checksum.update, iter(lambda: file_like_object.read(32768), '')))
|
|
|
|
return checksum.hexdigest()
|
|
|
|
|
|
|
|
|
2011-12-21 22:40:23 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def temporary_mutation(obj, **kwargs):
|
|
|
|
"""Temporarily set the attr on a particular object to a given value then
|
|
|
|
revert when finished.
|
|
|
|
|
|
|
|
One use of this is to temporarily set the read_deleted flag on a context
|
|
|
|
object:
|
|
|
|
|
|
|
|
with temporary_mutation(context, read_deleted="yes"):
|
|
|
|
do_something_that_needed_deleted_objects()
|
|
|
|
"""
|
|
|
|
NOT_PRESENT = object()
|
|
|
|
|
|
|
|
old_values = {}
|
|
|
|
for attr, new_value in kwargs.items():
|
|
|
|
old_values[attr] = getattr(obj, attr, NOT_PRESENT)
|
|
|
|
setattr(obj, attr, new_value)
|
|
|
|
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
for attr, old_value in old_values.items():
|
|
|
|
if old_value is NOT_PRESENT:
|
|
|
|
del obj[attr]
|
|
|
|
else:
|
|
|
|
setattr(obj, attr, old_value)
|
2012-01-10 11:22:33 -06:00
|
|
|
|
|
|
|
|
|
|
|
def warn_deprecated_class(cls, msg):
|
|
|
|
"""
|
|
|
|
Issues a warning to indicate that the given class is deprecated.
|
|
|
|
If a message is given, it is appended to the deprecation warning.
|
|
|
|
"""
|
|
|
|
|
|
|
|
fullname = '%s.%s' % (cls.__module__, cls.__name__)
|
|
|
|
if msg:
|
|
|
|
fullmsg = _("Class %(fullname)s is deprecated: %(msg)s")
|
|
|
|
else:
|
|
|
|
fullmsg = _("Class %(fullname)s is deprecated")
|
|
|
|
|
|
|
|
# Issue the warning
|
|
|
|
warnings.warn(fullmsg % locals(), DeprecationWarning, stacklevel=3)
|
|
|
|
|
|
|
|
|
|
|
|
def warn_deprecated_function(func, msg):
|
|
|
|
"""
|
|
|
|
Issues a warning to indicate that the given function is
|
|
|
|
deprecated. If a message is given, it is appended to the
|
|
|
|
deprecation warning.
|
|
|
|
"""
|
|
|
|
|
|
|
|
name = func.__name__
|
|
|
|
|
|
|
|
# Find the function's definition
|
|
|
|
sourcefile = inspect.getsourcefile(func)
|
|
|
|
|
|
|
|
# Find the line number, if possible
|
|
|
|
if inspect.ismethod(func):
|
|
|
|
code = func.im_func.func_code
|
|
|
|
else:
|
|
|
|
code = func.func_code
|
|
|
|
lineno = getattr(code, 'co_firstlineno', None)
|
|
|
|
|
|
|
|
if lineno is None:
|
|
|
|
location = sourcefile
|
|
|
|
else:
|
|
|
|
location = "%s:%d" % (sourcefile, lineno)
|
|
|
|
|
|
|
|
# Build up the message
|
|
|
|
if msg:
|
|
|
|
fullmsg = _("Function %(name)s in %(location)s is deprecated: %(msg)s")
|
|
|
|
else:
|
|
|
|
fullmsg = _("Function %(name)s in %(location)s is deprecated")
|
|
|
|
|
|
|
|
# Issue the warning
|
|
|
|
warnings.warn(fullmsg % locals(), DeprecationWarning, stacklevel=3)
|
|
|
|
|
|
|
|
|
|
|
|
def _stubout(klass, message):
|
|
|
|
"""
|
|
|
|
Scans a class and generates wrapping stubs for __new__() and every
|
|
|
|
class and static method. Returns a dictionary which can be passed
|
|
|
|
to type() to generate a wrapping class.
|
|
|
|
"""
|
|
|
|
|
|
|
|
overrides = {}
|
|
|
|
|
|
|
|
def makestub_class(name, func):
|
|
|
|
"""
|
|
|
|
Create a stub for wrapping class methods.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def stub(cls, *args, **kwargs):
|
|
|
|
warn_deprecated_class(klass, message)
|
|
|
|
return func(*args, **kwargs)
|
|
|
|
|
|
|
|
# Overwrite the stub's name
|
|
|
|
stub.__name__ = name
|
|
|
|
stub.func_name = name
|
|
|
|
|
|
|
|
return classmethod(stub)
|
|
|
|
|
|
|
|
def makestub_static(name, func):
|
|
|
|
"""
|
|
|
|
Create a stub for wrapping static methods.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def stub(*args, **kwargs):
|
|
|
|
warn_deprecated_class(klass, message)
|
|
|
|
return func(*args, **kwargs)
|
|
|
|
|
|
|
|
# Overwrite the stub's name
|
|
|
|
stub.__name__ = name
|
|
|
|
stub.func_name = name
|
|
|
|
|
|
|
|
return staticmethod(stub)
|
|
|
|
|
|
|
|
for name, kind, _klass, _obj in inspect.classify_class_attrs(klass):
|
|
|
|
# We're only interested in __new__(), class methods, and
|
|
|
|
# static methods...
|
|
|
|
if (name != '__new__' and
|
|
|
|
kind not in ('class method', 'static method')):
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Get the function...
|
|
|
|
func = getattr(klass, name)
|
|
|
|
|
|
|
|
# Override it in the class
|
|
|
|
if kind == 'class method':
|
|
|
|
stub = makestub_class(name, func)
|
|
|
|
elif kind == 'static method' or name == '__new__':
|
|
|
|
stub = makestub_static(name, func)
|
|
|
|
|
|
|
|
# Save it in the overrides dictionary...
|
|
|
|
overrides[name] = stub
|
|
|
|
|
|
|
|
# Apply the overrides
|
|
|
|
for name, stub in overrides.items():
|
|
|
|
setattr(klass, name, stub)
|
|
|
|
|
|
|
|
|
|
|
|
def deprecated(message=''):
|
|
|
|
"""
|
|
|
|
Marks a function, class, or method as being deprecated. For
|
|
|
|
functions and methods, emits a warning each time the function or
|
|
|
|
method is called. For classes, generates a new subclass which
|
|
|
|
will emit a warning each time the class is instantiated, or each
|
|
|
|
time any class or static method is called.
|
|
|
|
|
|
|
|
If a message is passed to the decorator, that message will be
|
|
|
|
appended to the emitted warning. This may be used to suggest an
|
|
|
|
alternate way of achieving the desired effect, or to explain why
|
|
|
|
the function, class, or method is deprecated.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def decorator(f_or_c):
|
|
|
|
# Make sure we can deprecate it...
|
|
|
|
if not callable(f_or_c) or isinstance(f_or_c, types.ClassType):
|
|
|
|
warnings.warn("Cannot mark object %r as deprecated" % f_or_c,
|
|
|
|
DeprecationWarning, stacklevel=2)
|
|
|
|
return f_or_c
|
|
|
|
|
|
|
|
# If we're deprecating a class, create a subclass of it and
|
|
|
|
# stub out all the class and static methods
|
|
|
|
if inspect.isclass(f_or_c):
|
|
|
|
klass = f_or_c
|
|
|
|
_stubout(klass, message)
|
|
|
|
return klass
|
|
|
|
|
|
|
|
# OK, it's a function; use a traditional wrapper...
|
|
|
|
func = f_or_c
|
|
|
|
|
|
|
|
@functools.wraps(func)
|
|
|
|
def wrapper(*args, **kwargs):
|
|
|
|
warn_deprecated_function(func, message)
|
|
|
|
|
|
|
|
return func(*args, **kwargs)
|
|
|
|
|
|
|
|
return wrapper
|
|
|
|
return decorator
|
|
|
|
|
|
|
|
|
|
|
|
def _showwarning(message, category, filename, lineno, file=None, line=None):
|
|
|
|
"""
|
|
|
|
Redirect warnings into logging.
|
|
|
|
"""
|
|
|
|
|
|
|
|
fmtmsg = warnings.formatwarning(message, category, filename, lineno, line)
|
|
|
|
LOG.warning(fmtmsg)
|
|
|
|
|
|
|
|
|
|
|
|
# Install our warnings handler
|
|
|
|
warnings.showwarning = _showwarning
|
2012-01-19 21:36:42 -08:00
|
|
|
|
|
|
|
|
|
|
|
def service_is_up(service):
|
|
|
|
"""Check whether a service is up based on last heartbeat."""
|
|
|
|
last_heartbeat = service['updated_at'] or service['created_at']
|
|
|
|
# Timestamps in DB are UTC.
|
|
|
|
elapsed = total_seconds(utcnow() - last_heartbeat)
|
|
|
|
return abs(elapsed) <= FLAGS.service_down_time
|
2012-01-20 17:25:08 -08:00
|
|
|
|
|
|
|
|
|
|
|
def generate_mac_address():
|
|
|
|
"""Generate an Ethernet MAC address."""
|
2012-03-14 10:34:33 -07:00
|
|
|
# NOTE(vish): We would prefer to use 0xfe here to ensure that linux
|
|
|
|
# bridge mac addresses don't change, but it appears to
|
|
|
|
# conflict with libvirt, so we use the next highest octet
|
|
|
|
# that has the unicast and locally administered bits set
|
|
|
|
# properly: 0xfa.
|
|
|
|
# Discussion: https://bugs.launchpad.net/nova/+bug/921838
|
|
|
|
mac = [0xfa, 0x16, 0x3e,
|
2012-01-20 17:25:08 -08:00
|
|
|
random.randint(0x00, 0x7f),
|
|
|
|
random.randint(0x00, 0xff),
|
|
|
|
random.randint(0x00, 0xff)]
|
|
|
|
return ':'.join(map(lambda x: "%02x" % x, mac))
|
2012-02-03 15:29:00 -08:00
|
|
|
|
|
|
|
|
|
|
|
def read_file_as_root(file_path):
|
|
|
|
"""Secure helper to read file as root."""
|
|
|
|
try:
|
|
|
|
out, _err = execute('cat', file_path, run_as_root=True)
|
|
|
|
return out
|
|
|
|
except exception.ProcessExecutionError:
|
|
|
|
raise exception.FileNotFound(file_path=file_path)
|
2012-02-27 21:01:49 +00:00
|
|
|
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def temporary_chown(path, owner_uid=None):
|
|
|
|
"""Temporarily chown a path.
|
|
|
|
|
|
|
|
:params owner_uid: UID of temporary owner (defaults to current user)
|
|
|
|
"""
|
|
|
|
if owner_uid is None:
|
|
|
|
owner_uid = os.getuid()
|
|
|
|
|
|
|
|
orig_uid = os.stat(path).st_uid
|
|
|
|
|
|
|
|
if orig_uid != owner_uid:
|
|
|
|
execute('chown', owner_uid, path, run_as_root=True)
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
if orig_uid != owner_uid:
|
|
|
|
execute('chown', orig_uid, path, run_as_root=True)
|
2012-02-28 05:54:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def tempdir(**kwargs):
|
|
|
|
tmpdir = tempfile.mkdtemp(**kwargs)
|
|
|
|
try:
|
|
|
|
yield tmpdir
|
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
shutil.rmtree(tmpdir)
|
|
|
|
except OSError, e:
|
|
|
|
LOG.debug(_('Could not remove tmpdir: %s'), str(e))
|
2012-02-28 10:55:38 -05:00
|
|
|
|
|
|
|
|
|
|
|
def strcmp_const_time(s1, s2):
|
|
|
|
"""Constant-time string comparison.
|
|
|
|
|
|
|
|
:params s1: the first string
|
|
|
|
:params s2: the second string
|
|
|
|
|
|
|
|
:return: True if the strings are equal.
|
|
|
|
|
|
|
|
This function takes two strings and compares them. It is intended to be
|
|
|
|
used when doing a comparison for authentication purposes to help guard
|
|
|
|
against timing attacks.
|
|
|
|
"""
|
|
|
|
if len(s1) != len(s2):
|
|
|
|
return False
|
|
|
|
result = 0
|
|
|
|
for (a, b) in zip(s1, s2):
|
|
|
|
result |= ord(a) ^ ord(b)
|
|
|
|
return result == 0
|
2012-02-29 23:38:56 +00:00
|
|
|
|
|
|
|
|
|
|
|
class UndoManager(object):
|
|
|
|
"""Provides a mechanism to facilitate rolling back a series of actions
|
|
|
|
when an exception is raised.
|
|
|
|
"""
|
|
|
|
def __init__(self):
|
|
|
|
self.undo_stack = []
|
|
|
|
|
|
|
|
def undo_with(self, undo_func):
|
|
|
|
self.undo_stack.append(undo_func)
|
|
|
|
|
|
|
|
def _rollback(self):
|
|
|
|
for undo_func in reversed(self.undo_stack):
|
|
|
|
undo_func()
|
|
|
|
|
|
|
|
def rollback_and_reraise(self, msg=None):
|
|
|
|
"""Rollback a series of actions then re-raise the exception.
|
|
|
|
|
2012-03-05 14:33:40 -05:00
|
|
|
.. note:: (sirp) This should only be called within an
|
|
|
|
exception handler.
|
2012-02-29 23:38:56 +00:00
|
|
|
"""
|
|
|
|
with save_and_reraise_exception():
|
|
|
|
if msg:
|
|
|
|
LOG.exception(msg)
|
|
|
|
|
|
|
|
self._rollback()
|