Switch from FLAGS to CONF in misc modules
Use the global CONF variable instead of FLAGS. This is purely a cleanup since FLAGS is already just another reference to CONF. We leave the nova.flags imports until a later cleanup commit since removing them may cause unpredictable problems due to config options not being registered. Change-Id: Ib110ba8d1837780e90b0d3fe13f8e6b68ed15f65
This commit is contained in:
parent
8ce58defbe
commit
637e805634
nova
block_device.py
cert
cloudpipe
common
consoleauth
crypto.pyflags.pyimage
ipv6
manager.pynotifications.pyobjectstore
policy.pyquota.pyservice.pyutils.pyvnc
volume
wsgi.pytools/xenserver
@ -17,9 +17,10 @@
|
|||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova import flags
|
from nova import flags
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
|
|
||||||
DEFAULT_ROOT_DEV_NAME = '/dev/sda1'
|
DEFAULT_ROOT_DEV_NAME = '/dev/sda1'
|
||||||
_DEFAULT_MAPPINGS = {'ami': 'sda1',
|
_DEFAULT_MAPPINGS = {'ami': 'sda1',
|
||||||
@ -94,7 +95,7 @@ def instance_block_mapping(instance, bdms):
|
|||||||
root_device_name = instance['root_device_name']
|
root_device_name = instance['root_device_name']
|
||||||
# NOTE(clayg): remove this when xenapi is setting default_root_device
|
# NOTE(clayg): remove this when xenapi is setting default_root_device
|
||||||
if root_device_name is None:
|
if root_device_name is None:
|
||||||
if FLAGS.compute_driver.endswith('xenapi.XenAPIDriver'):
|
if CONF.compute_driver.endswith('xenapi.XenAPIDriver'):
|
||||||
root_device_name = '/dev/xvda'
|
root_device_name = '/dev/xvda'
|
||||||
else:
|
else:
|
||||||
return _DEFAULT_MAPPINGS
|
return _DEFAULT_MAPPINGS
|
||||||
|
@ -32,7 +32,6 @@ from nova import manager
|
|||||||
from nova.openstack.common import log as logging
|
from nova.openstack.common import log as logging
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
|
|
||||||
|
|
||||||
class CertManager(manager.Manager):
|
class CertManager(manager.Manager):
|
||||||
|
@ -18,11 +18,11 @@
|
|||||||
Client side of the cert manager RPC API.
|
Client side of the cert manager RPC API.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova import flags
|
from nova import flags
|
||||||
import nova.openstack.common.rpc.proxy
|
import nova.openstack.common.rpc.proxy
|
||||||
|
|
||||||
|
CONF = config.CONF
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
|
|
||||||
|
|
||||||
class CertAPI(nova.openstack.common.rpc.proxy.RpcProxy):
|
class CertAPI(nova.openstack.common.rpc.proxy.RpcProxy):
|
||||||
@ -45,7 +45,7 @@ class CertAPI(nova.openstack.common.rpc.proxy.RpcProxy):
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(CertAPI, self).__init__(
|
super(CertAPI, self).__init__(
|
||||||
topic=FLAGS.cert_topic,
|
topic=CONF.cert_topic,
|
||||||
default_version=self.BASE_RPC_API_VERSION)
|
default_version=self.BASE_RPC_API_VERSION)
|
||||||
|
|
||||||
def revoke_certs_by_user(self, ctxt, user_id):
|
def revoke_certs_by_user(self, ctxt, user_id):
|
||||||
|
@ -55,12 +55,9 @@ cloudpipe_opts = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
CONF = config.CONF
|
CONF = config.CONF
|
||||||
|
CONF.register_opts(cloudpipe_opts)
|
||||||
CONF.import_opt('cnt_vpn_clients', 'nova.network.manager')
|
CONF.import_opt('cnt_vpn_clients', 'nova.network.manager')
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
FLAGS.register_opts(cloudpipe_opts)
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@ -74,14 +71,14 @@ class CloudPipe(object):
|
|||||||
filename = "payload.zip"
|
filename = "payload.zip"
|
||||||
zippath = os.path.join(tmpdir, filename)
|
zippath = os.path.join(tmpdir, filename)
|
||||||
z = zipfile.ZipFile(zippath, "w", zipfile.ZIP_DEFLATED)
|
z = zipfile.ZipFile(zippath, "w", zipfile.ZIP_DEFLATED)
|
||||||
shellfile = open(FLAGS.boot_script_template, "r")
|
shellfile = open(CONF.boot_script_template, "r")
|
||||||
s = string.Template(shellfile.read())
|
s = string.Template(shellfile.read())
|
||||||
shellfile.close()
|
shellfile.close()
|
||||||
boot_script = s.substitute(cc_dmz=FLAGS.ec2_dmz_host,
|
boot_script = s.substitute(cc_dmz=CONF.ec2_dmz_host,
|
||||||
cc_port=FLAGS.ec2_port,
|
cc_port=CONF.ec2_port,
|
||||||
dmz_net=FLAGS.dmz_net,
|
dmz_net=CONF.dmz_net,
|
||||||
dmz_mask=FLAGS.dmz_mask,
|
dmz_mask=CONF.dmz_mask,
|
||||||
num_vpn=FLAGS.cnt_vpn_clients)
|
num_vpn=CONF.cnt_vpn_clients)
|
||||||
# genvpn, sign csr
|
# genvpn, sign csr
|
||||||
crypto.generate_vpn_files(project_id)
|
crypto.generate_vpn_files(project_id)
|
||||||
z.writestr('autorun.sh', boot_script)
|
z.writestr('autorun.sh', boot_script)
|
||||||
@ -110,19 +107,19 @@ class CloudPipe(object):
|
|||||||
key_name = self.setup_key_pair(context)
|
key_name = self.setup_key_pair(context)
|
||||||
group_name = self.setup_security_group(context)
|
group_name = self.setup_security_group(context)
|
||||||
instance_type = instance_types.get_instance_type_by_name(
|
instance_type = instance_types.get_instance_type_by_name(
|
||||||
FLAGS.vpn_instance_type)
|
CONF.vpn_instance_type)
|
||||||
instance_name = '%s%s' % (context.project_id, FLAGS.vpn_key_suffix)
|
instance_name = '%s%s' % (context.project_id, CONF.vpn_key_suffix)
|
||||||
user_data = self.get_encoded_zip(context.project_id)
|
user_data = self.get_encoded_zip(context.project_id)
|
||||||
return self.compute_api.create(context,
|
return self.compute_api.create(context,
|
||||||
instance_type,
|
instance_type,
|
||||||
FLAGS.vpn_image_id,
|
CONF.vpn_image_id,
|
||||||
display_name=instance_name,
|
display_name=instance_name,
|
||||||
user_data=user_data,
|
user_data=user_data,
|
||||||
key_name=key_name,
|
key_name=key_name,
|
||||||
security_group=[group_name])
|
security_group=[group_name])
|
||||||
|
|
||||||
def setup_security_group(self, context):
|
def setup_security_group(self, context):
|
||||||
group_name = '%s%s' % (context.project_id, FLAGS.vpn_key_suffix)
|
group_name = '%s%s' % (context.project_id, CONF.vpn_key_suffix)
|
||||||
if db.security_group_exists(context, context.project_id, group_name):
|
if db.security_group_exists(context, context.project_id, group_name):
|
||||||
return group_name
|
return group_name
|
||||||
group = {'user_id': context.user_id,
|
group = {'user_id': context.user_id,
|
||||||
@ -147,14 +144,14 @@ class CloudPipe(object):
|
|||||||
return group_name
|
return group_name
|
||||||
|
|
||||||
def setup_key_pair(self, context):
|
def setup_key_pair(self, context):
|
||||||
key_name = '%s%s' % (context.project_id, FLAGS.vpn_key_suffix)
|
key_name = '%s%s' % (context.project_id, CONF.vpn_key_suffix)
|
||||||
try:
|
try:
|
||||||
keypair_api = compute.api.KeypairAPI()
|
keypair_api = compute.api.KeypairAPI()
|
||||||
result = keypair_api.create_key_pair(context,
|
result = keypair_api.create_key_pair(context,
|
||||||
context.user_id,
|
context.user_id,
|
||||||
key_name)
|
key_name)
|
||||||
private_key = result['private_key']
|
private_key = result['private_key']
|
||||||
key_dir = os.path.join(FLAGS.keys_path, context.user_id)
|
key_dir = os.path.join(CONF.keys_path, context.user_id)
|
||||||
fileutils.ensure_tree(key_dir)
|
fileutils.ensure_tree(key_dir)
|
||||||
key_path = os.path.join(key_dir, '%s.pem' % key_name)
|
key_path = os.path.join(key_dir, '%s.pem' % key_name)
|
||||||
with open(key_path, 'w') as f:
|
with open(key_path, 'w') as f:
|
||||||
|
@ -25,6 +25,7 @@ import eventlet
|
|||||||
import eventlet.backdoor
|
import eventlet.backdoor
|
||||||
import greenlet
|
import greenlet
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova.openstack.common import cfg
|
from nova.openstack.common import cfg
|
||||||
|
|
||||||
@ -34,8 +35,8 @@ eventlet_backdoor_opts = [
|
|||||||
help='port for eventlet backdoor to listen')
|
help='port for eventlet backdoor to listen')
|
||||||
]
|
]
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
FLAGS.register_opts(eventlet_backdoor_opts)
|
CONF.register_opts(eventlet_backdoor_opts)
|
||||||
|
|
||||||
|
|
||||||
def dont_use_this():
|
def dont_use_this():
|
||||||
@ -62,7 +63,7 @@ backdoor_locals = {
|
|||||||
|
|
||||||
|
|
||||||
def initialize_if_enabled():
|
def initialize_if_enabled():
|
||||||
if FLAGS.backdoor_port is None:
|
if CONF.backdoor_port is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
# NOTE(johannes): The standard sys.displayhook will print the value of
|
# NOTE(johannes): The standard sys.displayhook will print the value of
|
||||||
@ -76,5 +77,5 @@ def initialize_if_enabled():
|
|||||||
sys.displayhook = displayhook
|
sys.displayhook = displayhook
|
||||||
|
|
||||||
eventlet.spawn(eventlet.backdoor.backdoor_server,
|
eventlet.spawn(eventlet.backdoor.backdoor_server,
|
||||||
eventlet.listen(('localhost', FLAGS.backdoor_port)),
|
eventlet.listen(('localhost', CONF.backdoor_port)),
|
||||||
locals=backdoor_locals)
|
locals=backdoor_locals)
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
|
|
||||||
"""Module to authenticate Consoles."""
|
"""Module to authenticate Consoles."""
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova.openstack.common import cfg
|
from nova.openstack.common import cfg
|
||||||
|
|
||||||
@ -26,5 +27,5 @@ consoleauth_topic_opt = cfg.StrOpt('consoleauth_topic',
|
|||||||
default='consoleauth',
|
default='consoleauth',
|
||||||
help='the topic console auth proxy nodes listen on')
|
help='the topic console auth proxy nodes listen on')
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
FLAGS.register_opt(consoleauth_topic_opt)
|
CONF.register_opt(consoleauth_topic_opt)
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import manager
|
from nova import manager
|
||||||
from nova.openstack.common import cfg
|
from nova.openstack.common import cfg
|
||||||
@ -38,8 +39,8 @@ consoleauth_opts = [
|
|||||||
help='Manager for console auth'),
|
help='Manager for console auth'),
|
||||||
]
|
]
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
FLAGS.register_opts(consoleauth_opts)
|
CONF.register_opts(consoleauth_opts)
|
||||||
|
|
||||||
|
|
||||||
class ConsoleAuthManager(manager.Manager):
|
class ConsoleAuthManager(manager.Manager):
|
||||||
@ -50,11 +51,11 @@ class ConsoleAuthManager(manager.Manager):
|
|||||||
def __init__(self, scheduler_driver=None, *args, **kwargs):
|
def __init__(self, scheduler_driver=None, *args, **kwargs):
|
||||||
super(ConsoleAuthManager, self).__init__(*args, **kwargs)
|
super(ConsoleAuthManager, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
if FLAGS.memcached_servers:
|
if CONF.memcached_servers:
|
||||||
import memcache
|
import memcache
|
||||||
else:
|
else:
|
||||||
from nova.common import memorycache as memcache
|
from nova.common import memorycache as memcache
|
||||||
self.mc = memcache.Client(FLAGS.memcached_servers,
|
self.mc = memcache.Client(CONF.memcached_servers,
|
||||||
debug=0)
|
debug=0)
|
||||||
|
|
||||||
def authorize_console(self, context, token, console_type, host, port,
|
def authorize_console(self, context, token, console_type, host, port,
|
||||||
@ -66,7 +67,7 @@ class ConsoleAuthManager(manager.Manager):
|
|||||||
'internal_access_path': internal_access_path,
|
'internal_access_path': internal_access_path,
|
||||||
'last_activity_at': time.time()}
|
'last_activity_at': time.time()}
|
||||||
data = jsonutils.dumps(token_dict)
|
data = jsonutils.dumps(token_dict)
|
||||||
self.mc.set(token, data, FLAGS.console_token_ttl)
|
self.mc.set(token, data, CONF.console_token_ttl)
|
||||||
LOG.audit(_("Received Token: %(token)s, %(token_dict)s)"), locals())
|
LOG.audit(_("Received Token: %(token)s, %(token_dict)s)"), locals())
|
||||||
|
|
||||||
def check_token(self, context, token):
|
def check_token(self, context, token):
|
||||||
|
@ -18,11 +18,11 @@
|
|||||||
Client side of the consoleauth RPC API.
|
Client side of the consoleauth RPC API.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova import flags
|
from nova import flags
|
||||||
import nova.openstack.common.rpc.proxy
|
import nova.openstack.common.rpc.proxy
|
||||||
|
|
||||||
|
CONF = config.CONF
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
|
|
||||||
|
|
||||||
class ConsoleAuthAPI(nova.openstack.common.rpc.proxy.RpcProxy):
|
class ConsoleAuthAPI(nova.openstack.common.rpc.proxy.RpcProxy):
|
||||||
@ -45,7 +45,7 @@ class ConsoleAuthAPI(nova.openstack.common.rpc.proxy.RpcProxy):
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(ConsoleAuthAPI, self).__init__(
|
super(ConsoleAuthAPI, self).__init__(
|
||||||
topic=FLAGS.consoleauth_topic,
|
topic=CONF.consoleauth_topic,
|
||||||
default_version=self.BASE_RPC_API_VERSION)
|
default_version=self.BASE_RPC_API_VERSION)
|
||||||
|
|
||||||
def authorize_console(self, ctxt, token, console_type, host, port,
|
def authorize_console(self, ctxt, token, console_type, host, port,
|
||||||
|
@ -28,6 +28,7 @@ import hashlib
|
|||||||
import os
|
import os
|
||||||
import string
|
import string
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
@ -72,30 +73,30 @@ crypto_opts = [
|
|||||||
'project, timestamp')),
|
'project, timestamp')),
|
||||||
]
|
]
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
FLAGS.register_opts(crypto_opts)
|
CONF.register_opts(crypto_opts)
|
||||||
|
|
||||||
|
|
||||||
def ca_folder(project_id=None):
|
def ca_folder(project_id=None):
|
||||||
if FLAGS.use_project_ca and project_id:
|
if CONF.use_project_ca and project_id:
|
||||||
return os.path.join(FLAGS.ca_path, 'projects', project_id)
|
return os.path.join(CONF.ca_path, 'projects', project_id)
|
||||||
return FLAGS.ca_path
|
return CONF.ca_path
|
||||||
|
|
||||||
|
|
||||||
def ca_path(project_id=None):
|
def ca_path(project_id=None):
|
||||||
return os.path.join(ca_folder(project_id), FLAGS.ca_file)
|
return os.path.join(ca_folder(project_id), CONF.ca_file)
|
||||||
|
|
||||||
|
|
||||||
def key_path(project_id=None):
|
def key_path(project_id=None):
|
||||||
return os.path.join(ca_folder(project_id), FLAGS.key_file)
|
return os.path.join(ca_folder(project_id), CONF.key_file)
|
||||||
|
|
||||||
|
|
||||||
def crl_path(project_id=None):
|
def crl_path(project_id=None):
|
||||||
return os.path.join(ca_folder(project_id), FLAGS.crl_file)
|
return os.path.join(ca_folder(project_id), CONF.crl_file)
|
||||||
|
|
||||||
|
|
||||||
def fetch_ca(project_id=None):
|
def fetch_ca(project_id=None):
|
||||||
if not FLAGS.use_project_ca:
|
if not CONF.use_project_ca:
|
||||||
project_id = None
|
project_id = None
|
||||||
ca_file_path = ca_path(project_id)
|
ca_file_path = ca_path(project_id)
|
||||||
if not os.path.exists(ca_file_path):
|
if not os.path.exists(ca_file_path):
|
||||||
@ -157,7 +158,7 @@ def generate_key_pair(bits=1024):
|
|||||||
|
|
||||||
def fetch_crl(project_id):
|
def fetch_crl(project_id):
|
||||||
"""Get crl file for project."""
|
"""Get crl file for project."""
|
||||||
if not FLAGS.use_project_ca:
|
if not CONF.use_project_ca:
|
||||||
project_id = None
|
project_id = None
|
||||||
crl_file_path = crl_path(project_id)
|
crl_file_path = crl_path(project_id)
|
||||||
if not os.path.exists(crl_file_path):
|
if not os.path.exists(crl_file_path):
|
||||||
@ -189,7 +190,7 @@ def revoke_cert(project_id, file_name):
|
|||||||
utils.execute('openssl', 'ca', '-config', './openssl.cnf', '-revoke',
|
utils.execute('openssl', 'ca', '-config', './openssl.cnf', '-revoke',
|
||||||
file_name)
|
file_name)
|
||||||
utils.execute('openssl', 'ca', '-gencrl', '-config', './openssl.cnf',
|
utils.execute('openssl', 'ca', '-gencrl', '-config', './openssl.cnf',
|
||||||
'-out', FLAGS.crl_file)
|
'-out', CONF.crl_file)
|
||||||
os.chdir(start)
|
os.chdir(start)
|
||||||
|
|
||||||
|
|
||||||
@ -219,12 +220,12 @@ def revoke_certs_by_user_and_project(user_id, project_id):
|
|||||||
|
|
||||||
def _project_cert_subject(project_id):
|
def _project_cert_subject(project_id):
|
||||||
"""Helper to generate user cert subject."""
|
"""Helper to generate user cert subject."""
|
||||||
return FLAGS.project_cert_subject % (project_id, timeutils.isotime())
|
return CONF.project_cert_subject % (project_id, timeutils.isotime())
|
||||||
|
|
||||||
|
|
||||||
def _user_cert_subject(user_id, project_id):
|
def _user_cert_subject(user_id, project_id):
|
||||||
"""Helper to generate user cert subject."""
|
"""Helper to generate user cert subject."""
|
||||||
return FLAGS.user_cert_subject % (project_id, user_id, timeutils.isotime())
|
return CONF.user_cert_subject % (project_id, user_id, timeutils.isotime())
|
||||||
|
|
||||||
|
|
||||||
def generate_x509_cert(user_id, project_id, bits=1024):
|
def generate_x509_cert(user_id, project_id, bits=1024):
|
||||||
@ -281,7 +282,7 @@ def generate_vpn_files(project_id):
|
|||||||
|
|
||||||
|
|
||||||
def sign_csr(csr_text, project_id=None):
|
def sign_csr(csr_text, project_id=None):
|
||||||
if not FLAGS.use_project_ca:
|
if not CONF.use_project_ca:
|
||||||
project_id = None
|
project_id = None
|
||||||
if not project_id:
|
if not project_id:
|
||||||
return _sign_csr(csr_text, ca_folder())
|
return _sign_csr(csr_text, ca_folder())
|
||||||
|
@ -30,10 +30,11 @@ import os
|
|||||||
import socket
|
import socket
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova.openstack.common import cfg
|
from nova.openstack.common import cfg
|
||||||
|
|
||||||
|
CONF = config.CONF
|
||||||
FLAGS = cfg.CONF
|
FLAGS = CONF
|
||||||
|
|
||||||
|
|
||||||
def _get_my_ip():
|
def _get_my_ip():
|
||||||
@ -88,8 +89,8 @@ debug_opts = [
|
|||||||
help='Add python stack traces to SQL as comment strings'),
|
help='Add python stack traces to SQL as comment strings'),
|
||||||
]
|
]
|
||||||
|
|
||||||
FLAGS.register_cli_opts(core_opts)
|
CONF.register_cli_opts(core_opts)
|
||||||
FLAGS.register_cli_opts(debug_opts)
|
CONF.register_cli_opts(debug_opts)
|
||||||
|
|
||||||
global_opts = [
|
global_opts = [
|
||||||
cfg.StrOpt('my_ip',
|
cfg.StrOpt('my_ip',
|
||||||
@ -376,4 +377,4 @@ global_opts = [
|
|||||||
'vmwareapi.VMWareESXDriver'),
|
'vmwareapi.VMWareESXDriver'),
|
||||||
]
|
]
|
||||||
|
|
||||||
FLAGS.register_opts(global_opts)
|
CONF.register_opts(global_opts)
|
||||||
|
@ -29,6 +29,7 @@ import urlparse
|
|||||||
import glanceclient
|
import glanceclient
|
||||||
import glanceclient.exc
|
import glanceclient.exc
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova.openstack.common import jsonutils
|
from nova.openstack.common import jsonutils
|
||||||
@ -37,7 +38,7 @@ from nova.openstack.common import timeutils
|
|||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
|
|
||||||
|
|
||||||
def _parse_image_ref(image_href):
|
def _parse_image_ref(image_href):
|
||||||
@ -63,8 +64,8 @@ def _create_glance_client(context, host, port, use_ssl, version=1):
|
|||||||
else:
|
else:
|
||||||
scheme = 'http'
|
scheme = 'http'
|
||||||
params = {}
|
params = {}
|
||||||
params['insecure'] = FLAGS.glance_api_insecure
|
params['insecure'] = CONF.glance_api_insecure
|
||||||
if FLAGS.auth_strategy == 'keystone':
|
if CONF.auth_strategy == 'keystone':
|
||||||
params['token'] = context.auth_token
|
params['token'] = context.auth_token
|
||||||
endpoint = '%s://%s:%s' % (scheme, host, port)
|
endpoint = '%s://%s:%s' % (scheme, host, port)
|
||||||
return glanceclient.Client(str(version), endpoint, **params)
|
return glanceclient.Client(str(version), endpoint, **params)
|
||||||
@ -72,12 +73,12 @@ def _create_glance_client(context, host, port, use_ssl, version=1):
|
|||||||
|
|
||||||
def get_api_servers():
|
def get_api_servers():
|
||||||
"""
|
"""
|
||||||
Shuffle a list of FLAGS.glance_api_servers and return an iterator
|
Shuffle a list of CONF.glance_api_servers and return an iterator
|
||||||
that will cycle through the list, looping around to the beginning
|
that will cycle through the list, looping around to the beginning
|
||||||
if necessary.
|
if necessary.
|
||||||
"""
|
"""
|
||||||
api_servers = []
|
api_servers = []
|
||||||
for api_server in FLAGS.glance_api_servers:
|
for api_server in CONF.glance_api_servers:
|
||||||
if '//' not in api_server:
|
if '//' not in api_server:
|
||||||
api_server = 'http://' + api_server
|
api_server = 'http://' + api_server
|
||||||
o = urlparse.urlparse(api_server)
|
o = urlparse.urlparse(api_server)
|
||||||
@ -124,12 +125,12 @@ class GlanceClientWrapper(object):
|
|||||||
def call(self, context, version, method, *args, **kwargs):
|
def call(self, context, version, method, *args, **kwargs):
|
||||||
"""
|
"""
|
||||||
Call a glance client method. If we get a connection error,
|
Call a glance client method. If we get a connection error,
|
||||||
retry the request according to FLAGS.glance_num_retries.
|
retry the request according to CONF.glance_num_retries.
|
||||||
"""
|
"""
|
||||||
retry_excs = (glanceclient.exc.ServiceUnavailable,
|
retry_excs = (glanceclient.exc.ServiceUnavailable,
|
||||||
glanceclient.exc.InvalidEndpoint,
|
glanceclient.exc.InvalidEndpoint,
|
||||||
glanceclient.exc.CommunicationError)
|
glanceclient.exc.CommunicationError)
|
||||||
num_attempts = 1 + FLAGS.glance_num_retries
|
num_attempts = 1 + CONF.glance_num_retries
|
||||||
|
|
||||||
for attempt in xrange(1, num_attempts + 1):
|
for attempt in xrange(1, num_attempts + 1):
|
||||||
client = self.client or self._create_onetime_client(context,
|
client = self.client or self._create_onetime_client(context,
|
||||||
|
@ -31,6 +31,7 @@ from lxml import etree
|
|||||||
|
|
||||||
from nova.api.ec2 import ec2utils
|
from nova.api.ec2 import ec2utils
|
||||||
import nova.cert.rpcapi
|
import nova.cert.rpcapi
|
||||||
|
from nova import config
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova.image import glance
|
from nova.image import glance
|
||||||
@ -60,8 +61,8 @@ s3_opts = [
|
|||||||
'when downloading from s3'),
|
'when downloading from s3'),
|
||||||
]
|
]
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
FLAGS.register_opts(s3_opts)
|
CONF.register_opts(s3_opts)
|
||||||
|
|
||||||
|
|
||||||
class S3ImageService(object):
|
class S3ImageService(object):
|
||||||
@ -152,17 +153,17 @@ class S3ImageService(object):
|
|||||||
def _conn(context):
|
def _conn(context):
|
||||||
# NOTE(vish): access and secret keys for s3 server are not
|
# NOTE(vish): access and secret keys for s3 server are not
|
||||||
# checked in nova-objectstore
|
# checked in nova-objectstore
|
||||||
access = FLAGS.s3_access_key
|
access = CONF.s3_access_key
|
||||||
if FLAGS.s3_affix_tenant:
|
if CONF.s3_affix_tenant:
|
||||||
access = '%s:%s' % (access, context.project_id)
|
access = '%s:%s' % (access, context.project_id)
|
||||||
secret = FLAGS.s3_secret_key
|
secret = CONF.s3_secret_key
|
||||||
calling = boto.s3.connection.OrdinaryCallingFormat()
|
calling = boto.s3.connection.OrdinaryCallingFormat()
|
||||||
return boto.s3.connection.S3Connection(aws_access_key_id=access,
|
return boto.s3.connection.S3Connection(aws_access_key_id=access,
|
||||||
aws_secret_access_key=secret,
|
aws_secret_access_key=secret,
|
||||||
is_secure=FLAGS.s3_use_ssl,
|
is_secure=CONF.s3_use_ssl,
|
||||||
calling_format=calling,
|
calling_format=calling,
|
||||||
port=FLAGS.s3_port,
|
port=CONF.s3_port,
|
||||||
host=FLAGS.s3_host)
|
host=CONF.s3_host)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _download_file(bucket, filename, local_dir):
|
def _download_file(bucket, filename, local_dir):
|
||||||
@ -260,7 +261,7 @@ class S3ImageService(object):
|
|||||||
def _s3_create(self, context, metadata):
|
def _s3_create(self, context, metadata):
|
||||||
"""Gets a manifest from s3 and makes an image."""
|
"""Gets a manifest from s3 and makes an image."""
|
||||||
|
|
||||||
image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir)
|
image_path = tempfile.mkdtemp(dir=CONF.image_decryption_dir)
|
||||||
|
|
||||||
image_location = metadata['properties']['image_location']
|
image_location = metadata['properties']['image_location']
|
||||||
bucket_name = image_location.split('/')[0]
|
bucket_name = image_location.split('/')[0]
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova.openstack.common import cfg
|
from nova.openstack.common import cfg
|
||||||
from nova import utils
|
from nova import utils
|
||||||
@ -23,8 +24,8 @@ ipv6_backend_opt = cfg.StrOpt('ipv6_backend',
|
|||||||
default='rfc2462',
|
default='rfc2462',
|
||||||
help='Backend to use for IPv6 generation')
|
help='Backend to use for IPv6 generation')
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
FLAGS.register_opt(ipv6_backend_opt)
|
CONF.register_opt(ipv6_backend_opt)
|
||||||
IMPL = None
|
IMPL = None
|
||||||
|
|
||||||
|
|
||||||
|
@ -55,6 +55,7 @@ This module provides Manager, a base class for managers.
|
|||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova.db import base
|
from nova.db import base
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova.openstack.common import log as logging
|
from nova.openstack.common import log as logging
|
||||||
@ -63,10 +64,7 @@ from nova.openstack.common.rpc import dispatcher as rpc_dispatcher
|
|||||||
from nova.scheduler import rpcapi as scheduler_rpcapi
|
from nova.scheduler import rpcapi as scheduler_rpcapi
|
||||||
from nova import version
|
from nova import version
|
||||||
|
|
||||||
|
CONF = config.CONF
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@ -139,7 +137,7 @@ class Manager(base.Base):
|
|||||||
|
|
||||||
def __init__(self, host=None, db_driver=None):
|
def __init__(self, host=None, db_driver=None):
|
||||||
if not host:
|
if not host:
|
||||||
host = FLAGS.host
|
host = CONF.host
|
||||||
self.host = host
|
self.host = host
|
||||||
self.load_plugins()
|
self.load_plugins()
|
||||||
super(Manager, self).__init__(db_driver)
|
super(Manager, self).__init__(db_driver)
|
||||||
@ -215,8 +213,8 @@ class Manager(base.Base):
|
|||||||
|
|
||||||
def service_config(self, context):
|
def service_config(self, context):
|
||||||
config = {}
|
config = {}
|
||||||
for key in FLAGS:
|
for key in CONF:
|
||||||
config[key] = FLAGS.get(key, None)
|
config[key] = CONF.get(key, None)
|
||||||
return config
|
return config
|
||||||
|
|
||||||
|
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
the system.
|
the system.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from nova import config
|
||||||
import nova.context
|
import nova.context
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
@ -50,16 +51,16 @@ notify_api_faults = cfg.BoolOpt('notify_api_faults', default=False,
|
|||||||
'in the API service.')
|
'in the API service.')
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
FLAGS.register_opt(notify_state_opt)
|
CONF.register_opt(notify_state_opt)
|
||||||
FLAGS.register_opt(notify_any_opt)
|
CONF.register_opt(notify_any_opt)
|
||||||
FLAGS.register_opt(notify_api_faults)
|
CONF.register_opt(notify_api_faults)
|
||||||
|
|
||||||
|
|
||||||
def send_api_fault(url, status, exception):
|
def send_api_fault(url, status, exception):
|
||||||
"""Send an api.fault notification."""
|
"""Send an api.fault notification."""
|
||||||
|
|
||||||
if not FLAGS.notify_api_faults:
|
if not CONF.notify_api_faults:
|
||||||
return
|
return
|
||||||
|
|
||||||
payload = {'url': url, 'exception': str(exception), 'status': status}
|
payload = {'url': url, 'exception': str(exception), 'status': status}
|
||||||
@ -75,7 +76,7 @@ def send_update(context, old_instance, new_instance, service=None, host=None):
|
|||||||
in that instance
|
in that instance
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not FLAGS.notify_on_any_change and not FLAGS.notify_on_state_change:
|
if not CONF.notify_on_any_change and not CONF.notify_on_state_change:
|
||||||
# skip all this if updates are disabled
|
# skip all this if updates are disabled
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -91,8 +92,8 @@ def send_update(context, old_instance, new_instance, service=None, host=None):
|
|||||||
if old_vm_state != new_vm_state:
|
if old_vm_state != new_vm_state:
|
||||||
# yes, the vm state is changing:
|
# yes, the vm state is changing:
|
||||||
update_with_state_change = True
|
update_with_state_change = True
|
||||||
elif FLAGS.notify_on_state_change:
|
elif CONF.notify_on_state_change:
|
||||||
if (FLAGS.notify_on_state_change.lower() == "vm_and_task_state" and
|
if (CONF.notify_on_state_change.lower() == "vm_and_task_state" and
|
||||||
old_task_state != new_task_state):
|
old_task_state != new_task_state):
|
||||||
# yes, the task state is changing:
|
# yes, the task state is changing:
|
||||||
update_with_state_change = True
|
update_with_state_change = True
|
||||||
@ -120,7 +121,7 @@ def send_update_with_states(context, instance, old_vm_state, new_vm_state,
|
|||||||
are any, in the instance
|
are any, in the instance
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not FLAGS.notify_on_state_change:
|
if not CONF.notify_on_state_change:
|
||||||
# skip all this if updates are disabled
|
# skip all this if updates are disabled
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -135,8 +136,8 @@ def send_update_with_states(context, instance, old_vm_state, new_vm_state,
|
|||||||
if old_vm_state != new_vm_state:
|
if old_vm_state != new_vm_state:
|
||||||
# yes, the vm state is changing:
|
# yes, the vm state is changing:
|
||||||
fire_update = True
|
fire_update = True
|
||||||
elif FLAGS.notify_on_state_change:
|
elif CONF.notify_on_state_change:
|
||||||
if (FLAGS.notify_on_state_change.lower() == "vm_and_task_state" and
|
if (CONF.notify_on_state_change.lower() == "vm_and_task_state" and
|
||||||
old_task_state != new_task_state):
|
old_task_state != new_task_state):
|
||||||
# yes, the task state is changing:
|
# yes, the task state is changing:
|
||||||
fire_update = True
|
fire_update = True
|
||||||
|
@ -44,6 +44,7 @@ import urllib
|
|||||||
import routes
|
import routes
|
||||||
import webob
|
import webob
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova.openstack.common import cfg
|
from nova.openstack.common import cfg
|
||||||
from nova.openstack.common import fileutils
|
from nova.openstack.common import fileutils
|
||||||
@ -63,15 +64,15 @@ s3_opts = [
|
|||||||
help='port for s3 api to listen'),
|
help='port for s3 api to listen'),
|
||||||
]
|
]
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
FLAGS.register_opts(s3_opts)
|
CONF.register_opts(s3_opts)
|
||||||
|
|
||||||
|
|
||||||
def get_wsgi_server():
|
def get_wsgi_server():
|
||||||
return wsgi.Server("S3 Objectstore",
|
return wsgi.Server("S3 Objectstore",
|
||||||
S3Application(FLAGS.buckets_path),
|
S3Application(CONF.buckets_path),
|
||||||
port=FLAGS.s3_listen_port,
|
port=CONF.s3_listen_port,
|
||||||
host=FLAGS.s3_listen)
|
host=CONF.s3_listen)
|
||||||
|
|
||||||
|
|
||||||
class S3Application(wsgi.Router):
|
class S3Application(wsgi.Router):
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
import os.path
|
import os.path
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova.openstack.common import cfg
|
from nova.openstack.common import cfg
|
||||||
@ -35,8 +36,8 @@ policy_opts = [
|
|||||||
help=_('Rule checked when requested rule is not found')),
|
help=_('Rule checked when requested rule is not found')),
|
||||||
]
|
]
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
FLAGS.register_opts(policy_opts)
|
CONF.register_opts(policy_opts)
|
||||||
|
|
||||||
_POLICY_PATH = None
|
_POLICY_PATH = None
|
||||||
_POLICY_CACHE = {}
|
_POLICY_CACHE = {}
|
||||||
@ -54,17 +55,17 @@ def init():
|
|||||||
global _POLICY_PATH
|
global _POLICY_PATH
|
||||||
global _POLICY_CACHE
|
global _POLICY_CACHE
|
||||||
if not _POLICY_PATH:
|
if not _POLICY_PATH:
|
||||||
_POLICY_PATH = FLAGS.policy_file
|
_POLICY_PATH = CONF.policy_file
|
||||||
if not os.path.exists(_POLICY_PATH):
|
if not os.path.exists(_POLICY_PATH):
|
||||||
_POLICY_PATH = FLAGS.find_file(_POLICY_PATH)
|
_POLICY_PATH = CONF.find_file(_POLICY_PATH)
|
||||||
if not _POLICY_PATH:
|
if not _POLICY_PATH:
|
||||||
raise exception.ConfigNotFound(path=FLAGS.policy_file)
|
raise exception.ConfigNotFound(path=CONF.policy_file)
|
||||||
utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE,
|
utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE,
|
||||||
reload_func=_set_rules)
|
reload_func=_set_rules)
|
||||||
|
|
||||||
|
|
||||||
def _set_rules(data):
|
def _set_rules(data):
|
||||||
default_rule = FLAGS.policy_default_rule
|
default_rule = CONF.policy_default_rule
|
||||||
policy.set_rules(policy.Rules.load_json(data, default_rule))
|
policy.set_rules(policy.Rules.load_json(data, default_rule))
|
||||||
|
|
||||||
|
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
@ -85,8 +86,8 @@ quota_opts = [
|
|||||||
help='default driver to use for quota checks'),
|
help='default driver to use for quota checks'),
|
||||||
]
|
]
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
FLAGS.register_opts(quota_opts)
|
CONF.register_opts(quota_opts)
|
||||||
|
|
||||||
|
|
||||||
class DbQuotaDriver(object):
|
class DbQuotaDriver(object):
|
||||||
@ -314,7 +315,7 @@ class DbQuotaDriver(object):
|
|||||||
|
|
||||||
# Set up the reservation expiration
|
# Set up the reservation expiration
|
||||||
if expire is None:
|
if expire is None:
|
||||||
expire = FLAGS.reservation_expire
|
expire = CONF.reservation_expire
|
||||||
if isinstance(expire, (int, long)):
|
if isinstance(expire, (int, long)):
|
||||||
expire = datetime.timedelta(seconds=expire)
|
expire = datetime.timedelta(seconds=expire)
|
||||||
if isinstance(expire, datetime.timedelta):
|
if isinstance(expire, datetime.timedelta):
|
||||||
@ -335,7 +336,7 @@ class DbQuotaDriver(object):
|
|||||||
# session isn't available outside the DBAPI, we
|
# session isn't available outside the DBAPI, we
|
||||||
# have to do the work there.
|
# have to do the work there.
|
||||||
return db.quota_reserve(context, resources, quotas, deltas, expire,
|
return db.quota_reserve(context, resources, quotas, deltas, expire,
|
||||||
FLAGS.until_refresh, FLAGS.max_age)
|
CONF.until_refresh, CONF.max_age)
|
||||||
|
|
||||||
def commit(self, context, reservations):
|
def commit(self, context, reservations):
|
||||||
"""Commit reservations.
|
"""Commit reservations.
|
||||||
@ -476,7 +477,7 @@ class BaseResource(object):
|
|||||||
def default(self):
|
def default(self):
|
||||||
"""Return the default value of the quota."""
|
"""Return the default value of the quota."""
|
||||||
|
|
||||||
return FLAGS[self.flag] if self.flag else -1
|
return CONF[self.flag] if self.flag else -1
|
||||||
|
|
||||||
|
|
||||||
class ReservableResource(BaseResource):
|
class ReservableResource(BaseResource):
|
||||||
@ -568,7 +569,7 @@ class QuotaEngine(object):
|
|||||||
"""Initialize a Quota object."""
|
"""Initialize a Quota object."""
|
||||||
|
|
||||||
if not quota_driver_class:
|
if not quota_driver_class:
|
||||||
quota_driver_class = FLAGS.quota_driver
|
quota_driver_class = CONF.quota_driver
|
||||||
|
|
||||||
if isinstance(quota_driver_class, basestring):
|
if isinstance(quota_driver_class, basestring):
|
||||||
quota_driver_class = importutils.import_object(quota_driver_class)
|
quota_driver_class = importutils.import_object(quota_driver_class)
|
||||||
|
@ -31,6 +31,7 @@ import eventlet
|
|||||||
import greenlet
|
import greenlet
|
||||||
|
|
||||||
from nova.common import eventlet_backdoor
|
from nova.common import eventlet_backdoor
|
||||||
|
from nova import config
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
@ -90,8 +91,8 @@ service_opts = [
|
|||||||
help='Number of workers for metadata service'),
|
help='Number of workers for metadata service'),
|
||||||
]
|
]
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
FLAGS.register_opts(service_opts)
|
CONF.register_opts(service_opts)
|
||||||
|
|
||||||
|
|
||||||
class SignalExit(SystemExit):
|
class SignalExit(SystemExit):
|
||||||
@ -167,9 +168,9 @@ class ServiceLauncher(Launcher):
|
|||||||
signal.signal(signal.SIGTERM, self._handle_signal)
|
signal.signal(signal.SIGTERM, self._handle_signal)
|
||||||
signal.signal(signal.SIGINT, self._handle_signal)
|
signal.signal(signal.SIGINT, self._handle_signal)
|
||||||
|
|
||||||
LOG.debug(_('Full set of FLAGS:'))
|
LOG.debug(_('Full set of CONF:'))
|
||||||
for flag in FLAGS:
|
for flag in CONF:
|
||||||
flag_get = FLAGS.get(flag, None)
|
flag_get = CONF.get(flag, None)
|
||||||
# hide flag contents from log if contains a password
|
# hide flag contents from log if contains a password
|
||||||
# should use secret flag when switch over to openstack-common
|
# should use secret flag when switch over to openstack-common
|
||||||
if ("_password" in flag or "_key" in flag or
|
if ("_password" in flag or "_key" in flag or
|
||||||
@ -436,7 +437,7 @@ class Service(object):
|
|||||||
self.timers.append(periodic)
|
self.timers.append(periodic)
|
||||||
|
|
||||||
def _create_service_ref(self, context):
|
def _create_service_ref(self, context):
|
||||||
zone = FLAGS.node_availability_zone
|
zone = CONF.node_availability_zone
|
||||||
service_ref = db.service_create(context,
|
service_ref = db.service_create(context,
|
||||||
{'host': self.host,
|
{'host': self.host,
|
||||||
'binary': self.binary,
|
'binary': self.binary,
|
||||||
@ -455,30 +456,30 @@ class Service(object):
|
|||||||
periodic_fuzzy_delay=None):
|
periodic_fuzzy_delay=None):
|
||||||
"""Instantiates class and passes back application object.
|
"""Instantiates class and passes back application object.
|
||||||
|
|
||||||
:param host: defaults to FLAGS.host
|
:param host: defaults to CONF.host
|
||||||
:param binary: defaults to basename of executable
|
:param binary: defaults to basename of executable
|
||||||
:param topic: defaults to bin_name - 'nova-' part
|
:param topic: defaults to bin_name - 'nova-' part
|
||||||
:param manager: defaults to FLAGS.<topic>_manager
|
:param manager: defaults to CONF.<topic>_manager
|
||||||
:param report_interval: defaults to FLAGS.report_interval
|
:param report_interval: defaults to CONF.report_interval
|
||||||
:param periodic_interval: defaults to FLAGS.periodic_interval
|
:param periodic_interval: defaults to CONF.periodic_interval
|
||||||
:param periodic_fuzzy_delay: defaults to FLAGS.periodic_fuzzy_delay
|
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if not host:
|
if not host:
|
||||||
host = FLAGS.host
|
host = CONF.host
|
||||||
if not binary:
|
if not binary:
|
||||||
binary = os.path.basename(inspect.stack()[-1][1])
|
binary = os.path.basename(inspect.stack()[-1][1])
|
||||||
if not topic:
|
if not topic:
|
||||||
topic = binary.rpartition('nova-')[2]
|
topic = binary.rpartition('nova-')[2]
|
||||||
if not manager:
|
if not manager:
|
||||||
manager = FLAGS.get('%s_manager' %
|
manager = CONF.get('%s_manager' %
|
||||||
binary.rpartition('nova-')[2], None)
|
binary.rpartition('nova-')[2], None)
|
||||||
if report_interval is None:
|
if report_interval is None:
|
||||||
report_interval = FLAGS.report_interval
|
report_interval = CONF.report_interval
|
||||||
if periodic_interval is None:
|
if periodic_interval is None:
|
||||||
periodic_interval = FLAGS.periodic_interval
|
periodic_interval = CONF.periodic_interval
|
||||||
if periodic_fuzzy_delay is None:
|
if periodic_fuzzy_delay is None:
|
||||||
periodic_fuzzy_delay = FLAGS.periodic_fuzzy_delay
|
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
|
||||||
service_obj = cls(host, binary, topic, manager,
|
service_obj = cls(host, binary, topic, manager,
|
||||||
report_interval=report_interval,
|
report_interval=report_interval,
|
||||||
periodic_interval=periodic_interval,
|
periodic_interval=periodic_interval,
|
||||||
@ -523,7 +524,7 @@ class Service(object):
|
|||||||
def report_state(self):
|
def report_state(self):
|
||||||
"""Update the state of this service in the datastore."""
|
"""Update the state of this service in the datastore."""
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
zone = FLAGS.node_availability_zone
|
zone = CONF.node_availability_zone
|
||||||
state_catalog = {}
|
state_catalog = {}
|
||||||
try:
|
try:
|
||||||
try:
|
try:
|
||||||
@ -568,9 +569,9 @@ class WSGIService(object):
|
|||||||
self.manager = self._get_manager()
|
self.manager = self._get_manager()
|
||||||
self.loader = loader or wsgi.Loader()
|
self.loader = loader or wsgi.Loader()
|
||||||
self.app = self.loader.load_app(name)
|
self.app = self.loader.load_app(name)
|
||||||
self.host = getattr(FLAGS, '%s_listen' % name, "0.0.0.0")
|
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
|
||||||
self.port = getattr(FLAGS, '%s_listen_port' % name, 0)
|
self.port = getattr(CONF, '%s_listen_port' % name, 0)
|
||||||
self.workers = getattr(FLAGS, '%s_workers' % name, None)
|
self.workers = getattr(CONF, '%s_workers' % name, None)
|
||||||
self.server = wsgi.Server(name,
|
self.server = wsgi.Server(name,
|
||||||
self.app,
|
self.app,
|
||||||
host=self.host,
|
host=self.host,
|
||||||
@ -589,10 +590,10 @@ class WSGIService(object):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
fl = '%s_manager' % self.name
|
fl = '%s_manager' % self.name
|
||||||
if not fl in FLAGS:
|
if not fl in CONF:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
manager_class_name = FLAGS.get(fl, None)
|
manager_class_name = CONF.get(fl, None)
|
||||||
if not manager_class_name:
|
if not manager_class_name:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -47,6 +47,7 @@ from eventlet import greenthread
|
|||||||
from eventlet import semaphore
|
from eventlet import semaphore
|
||||||
import netaddr
|
import netaddr
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova.openstack.common import cfg
|
from nova.openstack.common import cfg
|
||||||
@ -57,9 +58,8 @@ from nova.openstack.common import timeutils
|
|||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
|
CONF.register_opt(
|
||||||
FLAGS.register_opt(
|
|
||||||
cfg.BoolOpt('disable_process_locking', default=False,
|
cfg.BoolOpt('disable_process_locking', default=False,
|
||||||
help='Whether to disable inter-process locks'))
|
help='Whether to disable inter-process locks'))
|
||||||
|
|
||||||
@ -171,7 +171,7 @@ def execute(*cmd, **kwargs):
|
|||||||
'to utils.execute: %r') % kwargs)
|
'to utils.execute: %r') % kwargs)
|
||||||
|
|
||||||
if run_as_root and os.geteuid() != 0:
|
if run_as_root and os.geteuid() != 0:
|
||||||
cmd = ['sudo', 'nova-rootwrap', FLAGS.rootwrap_config] + list(cmd)
|
cmd = ['sudo', 'nova-rootwrap', CONF.rootwrap_config] + list(cmd)
|
||||||
|
|
||||||
cmd = map(str, cmd)
|
cmd = map(str, cmd)
|
||||||
|
|
||||||
@ -330,7 +330,7 @@ def last_completed_audit_period(unit=None, before=None):
|
|||||||
The begin timestamp of this audit period is the same as the
|
The begin timestamp of this audit period is the same as the
|
||||||
end of the previous."""
|
end of the previous."""
|
||||||
if not unit:
|
if not unit:
|
||||||
unit = FLAGS.instance_usage_audit_period
|
unit = CONF.instance_usage_audit_period
|
||||||
|
|
||||||
offset = 0
|
offset = 0
|
||||||
if '@' in unit:
|
if '@' in unit:
|
||||||
@ -483,7 +483,7 @@ class LazyPluggable(object):
|
|||||||
|
|
||||||
def __get_backend(self):
|
def __get_backend(self):
|
||||||
if not self.__backend:
|
if not self.__backend:
|
||||||
backend_name = FLAGS[self.__pivot]
|
backend_name = CONF[self.__pivot]
|
||||||
if backend_name not in self.__backends:
|
if backend_name not in self.__backends:
|
||||||
msg = _('Invalid backend: %s') % backend_name
|
msg = _('Invalid backend: %s') % backend_name
|
||||||
raise exception.NovaException(msg)
|
raise exception.NovaException(msg)
|
||||||
@ -851,7 +851,7 @@ def monkey_patch():
|
|||||||
this function patches a decorator
|
this function patches a decorator
|
||||||
for all functions in specified modules.
|
for all functions in specified modules.
|
||||||
You can set decorators for each modules
|
You can set decorators for each modules
|
||||||
using FLAGS.monkey_patch_modules.
|
using CONF.monkey_patch_modules.
|
||||||
The format is "Module path:Decorator function".
|
The format is "Module path:Decorator function".
|
||||||
Example: 'nova.api.ec2.cloud:nova.notifier.api.notify_decorator'
|
Example: 'nova.api.ec2.cloud:nova.notifier.api.notify_decorator'
|
||||||
|
|
||||||
@ -861,11 +861,11 @@ def monkey_patch():
|
|||||||
name - name of the function
|
name - name of the function
|
||||||
function - object of the function
|
function - object of the function
|
||||||
"""
|
"""
|
||||||
# If FLAGS.monkey_patch is not True, this function do nothing.
|
# If CONF.monkey_patch is not True, this function do nothing.
|
||||||
if not FLAGS.monkey_patch:
|
if not CONF.monkey_patch:
|
||||||
return
|
return
|
||||||
# Get list of modules and decorators
|
# Get list of modules and decorators
|
||||||
for module_and_decorator in FLAGS.monkey_patch_modules:
|
for module_and_decorator in CONF.monkey_patch_modules:
|
||||||
module, decorator_name = module_and_decorator.split(':')
|
module, decorator_name = module_and_decorator.split(':')
|
||||||
# import decorator function
|
# import decorator function
|
||||||
decorator = importutils.import_class(decorator_name)
|
decorator = importutils.import_class(decorator_name)
|
||||||
@ -913,7 +913,7 @@ def generate_glance_url():
|
|||||||
"""Generate the URL to glance."""
|
"""Generate the URL to glance."""
|
||||||
# TODO(jk0): This will eventually need to take SSL into consideration
|
# TODO(jk0): This will eventually need to take SSL into consideration
|
||||||
# when supported in glance.
|
# when supported in glance.
|
||||||
return "http://%s:%d" % (FLAGS.glance_host, FLAGS.glance_port)
|
return "http://%s:%d" % (CONF.glance_host, CONF.glance_port)
|
||||||
|
|
||||||
|
|
||||||
def generate_image_url(image_ref):
|
def generate_image_url(image_ref):
|
||||||
@ -1044,7 +1044,7 @@ def service_is_up(service):
|
|||||||
last_heartbeat = service['updated_at'] or service['created_at']
|
last_heartbeat = service['updated_at'] or service['created_at']
|
||||||
# Timestamps in DB are UTC.
|
# Timestamps in DB are UTC.
|
||||||
elapsed = total_seconds(timeutils.utcnow() - last_heartbeat)
|
elapsed = total_seconds(timeutils.utcnow() - last_heartbeat)
|
||||||
return abs(elapsed) <= FLAGS.service_down_time
|
return abs(elapsed) <= CONF.service_down_time
|
||||||
|
|
||||||
|
|
||||||
def generate_mac_address():
|
def generate_mac_address():
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
|
|
||||||
"""Module for VNC Proxying."""
|
"""Module for VNC Proxying."""
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova.openstack.common import cfg
|
from nova.openstack.common import cfg
|
||||||
|
|
||||||
@ -46,5 +47,5 @@ vnc_opts = [
|
|||||||
help='keymap for vnc'),
|
help='keymap for vnc'),
|
||||||
]
|
]
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
FLAGS.register_opts(vnc_opts)
|
CONF.register_opts(vnc_opts)
|
||||||
|
@ -26,6 +26,7 @@ import eventlet.green
|
|||||||
import eventlet.greenio
|
import eventlet.greenio
|
||||||
import eventlet.wsgi
|
import eventlet.wsgi
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova.consoleauth import rpcapi as consoleauth_rpcapi
|
from nova.consoleauth import rpcapi as consoleauth_rpcapi
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import flags
|
from nova import flags
|
||||||
@ -46,8 +47,8 @@ xvp_proxy_opts = [
|
|||||||
help='Address that the XCP VNC proxy should bind to'),
|
help='Address that the XCP VNC proxy should bind to'),
|
||||||
]
|
]
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
FLAGS.register_opts(xvp_proxy_opts)
|
CONF.register_opts(xvp_proxy_opts)
|
||||||
|
|
||||||
|
|
||||||
class XCPVNCProxy(object):
|
class XCPVNCProxy(object):
|
||||||
@ -180,5 +181,5 @@ def get_wsgi_server():
|
|||||||
return wsgi.Server("XCP VNC Proxy",
|
return wsgi.Server("XCP VNC Proxy",
|
||||||
XCPVNCProxy(),
|
XCPVNCProxy(),
|
||||||
protocol=SafeHttpProtocol,
|
protocol=SafeHttpProtocol,
|
||||||
host=FLAGS.xvpvncproxy_host,
|
host=CONF.xvpvncproxy_host,
|
||||||
port=FLAGS.xvpvncproxy_port)
|
port=CONF.xvpvncproxy_port)
|
||||||
|
@ -18,11 +18,12 @@
|
|||||||
|
|
||||||
# Importing full names to not pollute the namespace and cause possible
|
# Importing full names to not pollute the namespace and cause possible
|
||||||
# collisions with use of 'from nova.volume import <foo>' elsewhere.
|
# collisions with use of 'from nova.volume import <foo>' elsewhere.
|
||||||
|
import nova.config
|
||||||
import nova.flags
|
import nova.flags
|
||||||
import nova.openstack.common.importutils
|
import nova.openstack.common.importutils
|
||||||
|
|
||||||
|
|
||||||
def API():
|
def API():
|
||||||
importutils = nova.openstack.common.importutils
|
importutils = nova.openstack.common.importutils
|
||||||
cls = importutils.import_class(nova.flags.FLAGS.volume_api_class)
|
cls = importutils.import_class(nova.config.CONF.volume_api_class)
|
||||||
return cls()
|
return cls()
|
||||||
|
@ -24,6 +24,7 @@ Handles all requests relating to volumes + cinder.
|
|||||||
from cinderclient import service_catalog
|
from cinderclient import service_catalog
|
||||||
from cinderclient.v1 import client as cinder_client
|
from cinderclient.v1 import client as cinder_client
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova.db import base
|
from nova.db import base
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
@ -42,8 +43,8 @@ cinder_opts = [
|
|||||||
'endpoint e.g. http://localhost:8776/v1/%(project_id)s'),
|
'endpoint e.g. http://localhost:8776/v1/%(project_id)s'),
|
||||||
]
|
]
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
FLAGS.register_opts(cinder_opts)
|
CONF.register_opts(cinder_opts)
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -56,10 +57,10 @@ def cinderclient(context):
|
|||||||
'access': {'serviceCatalog': context.service_catalog or {}}
|
'access': {'serviceCatalog': context.service_catalog or {}}
|
||||||
}
|
}
|
||||||
sc = service_catalog.ServiceCatalog(compat_catalog)
|
sc = service_catalog.ServiceCatalog(compat_catalog)
|
||||||
if FLAGS.cinder_endpoint_template:
|
if CONF.cinder_endpoint_template:
|
||||||
url = FLAGS.cinder_endpoint_template % context.to_dict()
|
url = CONF.cinder_endpoint_template % context.to_dict()
|
||||||
else:
|
else:
|
||||||
info = FLAGS.cinder_catalog_info
|
info = CONF.cinder_catalog_info
|
||||||
service_type, service_name, endpoint_type = info.split(':')
|
service_type, service_name, endpoint_type = info.split(':')
|
||||||
url = sc.url_for(service_type=service_type,
|
url = sc.url_for(service_type=service_type,
|
||||||
service_name=service_name,
|
service_name=service_name,
|
||||||
|
@ -25,6 +25,7 @@ import tempfile
|
|||||||
import time
|
import time
|
||||||
import urllib
|
import urllib
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova.openstack.common import cfg
|
from nova.openstack.common import cfg
|
||||||
@ -73,8 +74,8 @@ volume_opts = [
|
|||||||
'driver does not write them directly to the volume'),
|
'driver does not write them directly to the volume'),
|
||||||
]
|
]
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
FLAGS.register_opts(volume_opts)
|
CONF.register_opts(volume_opts)
|
||||||
|
|
||||||
|
|
||||||
class VolumeDriver(object):
|
class VolumeDriver(object):
|
||||||
@ -98,7 +99,7 @@ class VolumeDriver(object):
|
|||||||
return True
|
return True
|
||||||
except exception.ProcessExecutionError:
|
except exception.ProcessExecutionError:
|
||||||
tries = tries + 1
|
tries = tries + 1
|
||||||
if tries >= FLAGS.num_shell_tries:
|
if tries >= CONF.num_shell_tries:
|
||||||
raise
|
raise
|
||||||
LOG.exception(_("Recovering from a failed execute. "
|
LOG.exception(_("Recovering from a failed execute. "
|
||||||
"Try number %s"), tries)
|
"Try number %s"), tries)
|
||||||
@ -109,14 +110,14 @@ class VolumeDriver(object):
|
|||||||
out, err = self._execute('vgs', '--noheadings', '-o', 'name',
|
out, err = self._execute('vgs', '--noheadings', '-o', 'name',
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
volume_groups = out.split()
|
volume_groups = out.split()
|
||||||
if not FLAGS.volume_group in volume_groups:
|
if not CONF.volume_group in volume_groups:
|
||||||
exception_message = (_("volume group %s doesn't exist")
|
exception_message = (_("volume group %s doesn't exist")
|
||||||
% FLAGS.volume_group)
|
% CONF.volume_group)
|
||||||
raise exception.VolumeBackendAPIException(data=exception_message)
|
raise exception.VolumeBackendAPIException(data=exception_message)
|
||||||
|
|
||||||
def _create_volume(self, volume_name, sizestr):
|
def _create_volume(self, volume_name, sizestr):
|
||||||
self._try_execute('lvcreate', '-L', sizestr, '-n',
|
self._try_execute('lvcreate', '-L', sizestr, '-n',
|
||||||
volume_name, FLAGS.volume_group, run_as_root=True)
|
volume_name, CONF.volume_group, run_as_root=True)
|
||||||
|
|
||||||
def _copy_volume(self, srcstr, deststr, size_in_g):
|
def _copy_volume(self, srcstr, deststr, size_in_g):
|
||||||
# Use O_DIRECT to avoid thrashing the system buffer cache
|
# Use O_DIRECT to avoid thrashing the system buffer cache
|
||||||
@ -135,7 +136,7 @@ class VolumeDriver(object):
|
|||||||
*direct_flags, run_as_root=True)
|
*direct_flags, run_as_root=True)
|
||||||
|
|
||||||
def _volume_not_present(self, volume_name):
|
def _volume_not_present(self, volume_name):
|
||||||
path_name = '%s/%s' % (FLAGS.volume_group, volume_name)
|
path_name = '%s/%s' % (CONF.volume_group, volume_name)
|
||||||
try:
|
try:
|
||||||
self._try_execute('lvdisplay', path_name, run_as_root=True)
|
self._try_execute('lvdisplay', path_name, run_as_root=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -153,7 +154,7 @@ class VolumeDriver(object):
|
|||||||
self._try_execute('dmsetup', 'remove', '-f', dev_path,
|
self._try_execute('dmsetup', 'remove', '-f', dev_path,
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
self._try_execute('lvremove', '-f', "%s/%s" %
|
self._try_execute('lvremove', '-f', "%s/%s" %
|
||||||
(FLAGS.volume_group,
|
(CONF.volume_group,
|
||||||
self._escape_snapshot(volume['name'])),
|
self._escape_snapshot(volume['name'])),
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
|
|
||||||
@ -190,7 +191,7 @@ class VolumeDriver(object):
|
|||||||
# deleting derived snapshots. Can we do something fancy?
|
# deleting derived snapshots. Can we do something fancy?
|
||||||
out, err = self._execute('lvdisplay', '--noheading',
|
out, err = self._execute('lvdisplay', '--noheading',
|
||||||
'-C', '-o', 'Attr',
|
'-C', '-o', 'Attr',
|
||||||
'%s/%s' % (FLAGS.volume_group,
|
'%s/%s' % (CONF.volume_group,
|
||||||
volume['name']),
|
volume['name']),
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
# fake_execute returns None resulting unit test error
|
# fake_execute returns None resulting unit test error
|
||||||
@ -203,7 +204,7 @@ class VolumeDriver(object):
|
|||||||
|
|
||||||
def create_snapshot(self, snapshot):
|
def create_snapshot(self, snapshot):
|
||||||
"""Creates a snapshot."""
|
"""Creates a snapshot."""
|
||||||
orig_lv_name = "%s/%s" % (FLAGS.volume_group, snapshot['volume_name'])
|
orig_lv_name = "%s/%s" % (CONF.volume_group, snapshot['volume_name'])
|
||||||
self._try_execute('lvcreate', '-L',
|
self._try_execute('lvcreate', '-L',
|
||||||
self._sizestr(snapshot['volume_size']),
|
self._sizestr(snapshot['volume_size']),
|
||||||
'--name', self._escape_snapshot(snapshot['name']),
|
'--name', self._escape_snapshot(snapshot['name']),
|
||||||
@ -221,7 +222,7 @@ class VolumeDriver(object):
|
|||||||
|
|
||||||
def local_path(self, volume):
|
def local_path(self, volume):
|
||||||
# NOTE(vish): stops deprecation warning
|
# NOTE(vish): stops deprecation warning
|
||||||
escaped_group = FLAGS.volume_group.replace('-', '--')
|
escaped_group = CONF.volume_group.replace('-', '--')
|
||||||
escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')
|
escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')
|
||||||
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
|
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
|
||||||
|
|
||||||
@ -327,8 +328,8 @@ class ISCSIDriver(VolumeDriver):
|
|||||||
else:
|
else:
|
||||||
iscsi_target = 1 # dummy value when using TgtAdm
|
iscsi_target = 1 # dummy value when using TgtAdm
|
||||||
|
|
||||||
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
|
iscsi_name = "%s%s" % (CONF.iscsi_target_prefix, volume['name'])
|
||||||
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
|
volume_path = "/dev/%s/%s" % (CONF.volume_group, volume['name'])
|
||||||
|
|
||||||
# NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need
|
# NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need
|
||||||
# should clean this all up at some point in the future
|
# should clean this all up at some point in the future
|
||||||
@ -344,11 +345,11 @@ class ISCSIDriver(VolumeDriver):
|
|||||||
if not isinstance(self.tgtadm, iscsi.TgtAdm):
|
if not isinstance(self.tgtadm, iscsi.TgtAdm):
|
||||||
host_iscsi_targets = self.db.iscsi_target_count_by_host(context,
|
host_iscsi_targets = self.db.iscsi_target_count_by_host(context,
|
||||||
host)
|
host)
|
||||||
if host_iscsi_targets >= FLAGS.iscsi_num_targets:
|
if host_iscsi_targets >= CONF.iscsi_num_targets:
|
||||||
return
|
return
|
||||||
|
|
||||||
# NOTE(vish): Target ids start at 1, not 0.
|
# NOTE(vish): Target ids start at 1, not 0.
|
||||||
for target_num in xrange(1, FLAGS.iscsi_num_targets + 1):
|
for target_num in xrange(1, CONF.iscsi_num_targets + 1):
|
||||||
target = {'host': host, 'target_num': target_num}
|
target = {'host': host, 'target_num': target_num}
|
||||||
self.db.iscsi_target_create_safe(context, target)
|
self.db.iscsi_target_create_safe(context, target)
|
||||||
|
|
||||||
@ -356,8 +357,8 @@ class ISCSIDriver(VolumeDriver):
|
|||||||
"""Creates an export for a logical volume."""
|
"""Creates an export for a logical volume."""
|
||||||
#BOOKMARK(jdg)
|
#BOOKMARK(jdg)
|
||||||
|
|
||||||
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
|
iscsi_name = "%s%s" % (CONF.iscsi_target_prefix, volume['name'])
|
||||||
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
|
volume_path = "/dev/%s/%s" % (CONF.volume_group, volume['name'])
|
||||||
|
|
||||||
model_update = {}
|
model_update = {}
|
||||||
|
|
||||||
@ -380,7 +381,7 @@ class ISCSIDriver(VolumeDriver):
|
|||||||
0,
|
0,
|
||||||
volume_path)
|
volume_path)
|
||||||
model_update['provider_location'] = _iscsi_location(
|
model_update['provider_location'] = _iscsi_location(
|
||||||
FLAGS.iscsi_ip_address, tid, iscsi_name, lun)
|
CONF.iscsi_ip_address, tid, iscsi_name, lun)
|
||||||
return model_update
|
return model_update
|
||||||
|
|
||||||
def remove_export(self, context, volume):
|
def remove_export(self, context, volume):
|
||||||
@ -428,7 +429,7 @@ class ISCSIDriver(VolumeDriver):
|
|||||||
'-t', 'sendtargets', '-p', volume['host'],
|
'-t', 'sendtargets', '-p', volume['host'],
|
||||||
run_as_root=True)
|
run_as_root=True)
|
||||||
for target in out.splitlines():
|
for target in out.splitlines():
|
||||||
if FLAGS.iscsi_ip_address in target and volume_name in target:
|
if CONF.iscsi_ip_address in target and volume_name in target:
|
||||||
return target
|
return target
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@ -480,7 +481,7 @@ class ISCSIDriver(VolumeDriver):
|
|||||||
try:
|
try:
|
||||||
properties['target_lun'] = int(results[2])
|
properties['target_lun'] = int(results[2])
|
||||||
except (IndexError, ValueError):
|
except (IndexError, ValueError):
|
||||||
if FLAGS.iscsi_helper == 'tgtadm':
|
if CONF.iscsi_helper == 'tgtadm':
|
||||||
properties['target_lun'] = 1
|
properties['target_lun'] = 1
|
||||||
else:
|
else:
|
||||||
properties['target_lun'] = 0
|
properties['target_lun'] = 0
|
||||||
@ -542,9 +543,9 @@ class ISCSIDriver(VolumeDriver):
|
|||||||
def check_for_export(self, context, volume_id):
|
def check_for_export(self, context, volume_id):
|
||||||
"""Make sure volume is exported."""
|
"""Make sure volume is exported."""
|
||||||
vol_uuid_file = 'volume-%s' % volume_id
|
vol_uuid_file = 'volume-%s' % volume_id
|
||||||
volume_path = os.path.join(FLAGS.volumes_dir, vol_uuid_file)
|
volume_path = os.path.join(CONF.volumes_dir, vol_uuid_file)
|
||||||
if os.path.isfile(volume_path):
|
if os.path.isfile(volume_path):
|
||||||
iqn = '%s%s' % (FLAGS.iscsi_target_prefix,
|
iqn = '%s%s' % (CONF.iscsi_target_prefix,
|
||||||
vol_uuid_file)
|
vol_uuid_file)
|
||||||
else:
|
else:
|
||||||
raise exception.PersistentVolumeFileNotFound(volume_id=volume_id)
|
raise exception.PersistentVolumeFileNotFound(volume_id=volume_id)
|
||||||
@ -614,9 +615,9 @@ class RBDDriver(VolumeDriver):
|
|||||||
"""Returns an error if prerequisites aren't met"""
|
"""Returns an error if prerequisites aren't met"""
|
||||||
(stdout, stderr) = self._execute('rados', 'lspools')
|
(stdout, stderr) = self._execute('rados', 'lspools')
|
||||||
pools = stdout.split("\n")
|
pools = stdout.split("\n")
|
||||||
if not FLAGS.rbd_pool in pools:
|
if not CONF.rbd_pool in pools:
|
||||||
exception_message = (_("rbd has no pool %s") %
|
exception_message = (_("rbd has no pool %s") %
|
||||||
FLAGS.rbd_pool)
|
CONF.rbd_pool)
|
||||||
raise exception.VolumeBackendAPIException(data=exception_message)
|
raise exception.VolumeBackendAPIException(data=exception_message)
|
||||||
|
|
||||||
def _supports_layering(self):
|
def _supports_layering(self):
|
||||||
@ -630,7 +631,7 @@ class RBDDriver(VolumeDriver):
|
|||||||
else:
|
else:
|
||||||
size = int(volume['size']) * 1024
|
size = int(volume['size']) * 1024
|
||||||
args = ['rbd', 'create',
|
args = ['rbd', 'create',
|
||||||
'--pool', FLAGS.rbd_pool,
|
'--pool', CONF.rbd_pool,
|
||||||
'--size', size,
|
'--size', size,
|
||||||
volume['name']]
|
volume['name']]
|
||||||
if self._supports_layering():
|
if self._supports_layering():
|
||||||
@ -642,19 +643,19 @@ class RBDDriver(VolumeDriver):
|
|||||||
'--pool', src_pool,
|
'--pool', src_pool,
|
||||||
'--image', src_image,
|
'--image', src_image,
|
||||||
'--snap', src_snap,
|
'--snap', src_snap,
|
||||||
'--dest-pool', FLAGS.rbd_pool,
|
'--dest-pool', CONF.rbd_pool,
|
||||||
'--dest', volume['name'])
|
'--dest', volume['name'])
|
||||||
|
|
||||||
def _resize(self, volume):
|
def _resize(self, volume):
|
||||||
size = int(volume['size']) * 1024
|
size = int(volume['size']) * 1024
|
||||||
self._try_execute('rbd', 'resize',
|
self._try_execute('rbd', 'resize',
|
||||||
'--pool', FLAGS.rbd_pool,
|
'--pool', CONF.rbd_pool,
|
||||||
'--image', volume['name'],
|
'--image', volume['name'],
|
||||||
'--size', size)
|
'--size', size)
|
||||||
|
|
||||||
def create_volume_from_snapshot(self, volume, snapshot):
|
def create_volume_from_snapshot(self, volume, snapshot):
|
||||||
"""Creates a volume from a snapshot."""
|
"""Creates a volume from a snapshot."""
|
||||||
self._clone(volume, FLAGS.rbd_pool,
|
self._clone(volume, CONF.rbd_pool,
|
||||||
snapshot['volume_name'], snapshot['name'])
|
snapshot['volume_name'], snapshot['name'])
|
||||||
if int(volume['size']):
|
if int(volume['size']):
|
||||||
self._resize(volume)
|
self._resize(volume)
|
||||||
@ -662,23 +663,23 @@ class RBDDriver(VolumeDriver):
|
|||||||
def delete_volume(self, volume):
|
def delete_volume(self, volume):
|
||||||
"""Deletes a logical volume."""
|
"""Deletes a logical volume."""
|
||||||
stdout, _ = self._execute('rbd', 'snap', 'ls',
|
stdout, _ = self._execute('rbd', 'snap', 'ls',
|
||||||
'--pool', FLAGS.rbd_pool,
|
'--pool', CONF.rbd_pool,
|
||||||
volume['name'])
|
volume['name'])
|
||||||
if stdout.count('\n') > 1:
|
if stdout.count('\n') > 1:
|
||||||
raise exception.VolumeIsBusy(volume_name=volume['name'])
|
raise exception.VolumeIsBusy(volume_name=volume['name'])
|
||||||
self._try_execute('rbd', 'rm',
|
self._try_execute('rbd', 'rm',
|
||||||
'--pool', FLAGS.rbd_pool,
|
'--pool', CONF.rbd_pool,
|
||||||
volume['name'])
|
volume['name'])
|
||||||
|
|
||||||
def create_snapshot(self, snapshot):
|
def create_snapshot(self, snapshot):
|
||||||
"""Creates an rbd snapshot"""
|
"""Creates an rbd snapshot"""
|
||||||
self._try_execute('rbd', 'snap', 'create',
|
self._try_execute('rbd', 'snap', 'create',
|
||||||
'--pool', FLAGS.rbd_pool,
|
'--pool', CONF.rbd_pool,
|
||||||
'--snap', snapshot['name'],
|
'--snap', snapshot['name'],
|
||||||
snapshot['volume_name'])
|
snapshot['volume_name'])
|
||||||
if self._supports_layering():
|
if self._supports_layering():
|
||||||
self._try_execute('rbd', 'snap', 'protect',
|
self._try_execute('rbd', 'snap', 'protect',
|
||||||
'--pool', FLAGS.rbd_pool,
|
'--pool', CONF.rbd_pool,
|
||||||
'--snap', snapshot['name'],
|
'--snap', snapshot['name'],
|
||||||
snapshot['volume_name'])
|
snapshot['volume_name'])
|
||||||
|
|
||||||
@ -687,13 +688,13 @@ class RBDDriver(VolumeDriver):
|
|||||||
if self._supports_layering():
|
if self._supports_layering():
|
||||||
try:
|
try:
|
||||||
self._try_execute('rbd', 'snap', 'unprotect',
|
self._try_execute('rbd', 'snap', 'unprotect',
|
||||||
'--pool', FLAGS.rbd_pool,
|
'--pool', CONF.rbd_pool,
|
||||||
'--snap', snapshot['name'],
|
'--snap', snapshot['name'],
|
||||||
snapshot['volume_name'])
|
snapshot['volume_name'])
|
||||||
except exception.ProcessExecutionError:
|
except exception.ProcessExecutionError:
|
||||||
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
|
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
|
||||||
self._try_execute('rbd', 'snap', 'rm',
|
self._try_execute('rbd', 'snap', 'rm',
|
||||||
'--pool', FLAGS.rbd_pool,
|
'--pool', CONF.rbd_pool,
|
||||||
'--snap', snapshot['name'],
|
'--snap', snapshot['name'],
|
||||||
snapshot['volume_name'])
|
snapshot['volume_name'])
|
||||||
|
|
||||||
@ -701,7 +702,7 @@ class RBDDriver(VolumeDriver):
|
|||||||
"""Returns the path of the rbd volume."""
|
"""Returns the path of the rbd volume."""
|
||||||
# This is the same as the remote path
|
# This is the same as the remote path
|
||||||
# since qemu accesses it directly.
|
# since qemu accesses it directly.
|
||||||
return "rbd:%s/%s" % (FLAGS.rbd_pool, volume['name'])
|
return "rbd:%s/%s" % (CONF.rbd_pool, volume['name'])
|
||||||
|
|
||||||
def ensure_export(self, context, volume):
|
def ensure_export(self, context, volume):
|
||||||
"""Synchronously recreates an export for a logical volume."""
|
"""Synchronously recreates an export for a logical volume."""
|
||||||
@ -723,11 +724,11 @@ class RBDDriver(VolumeDriver):
|
|||||||
return {
|
return {
|
||||||
'driver_volume_type': 'rbd',
|
'driver_volume_type': 'rbd',
|
||||||
'data': {
|
'data': {
|
||||||
'name': '%s/%s' % (FLAGS.rbd_pool, volume['name']),
|
'name': '%s/%s' % (CONF.rbd_pool, volume['name']),
|
||||||
'auth_enabled': FLAGS.rbd_secret_uuid is not None,
|
'auth_enabled': CONF.rbd_secret_uuid is not None,
|
||||||
'auth_username': FLAGS.rbd_user,
|
'auth_username': CONF.rbd_user,
|
||||||
'secret_type': 'ceph',
|
'secret_type': 'ceph',
|
||||||
'secret_uuid': FLAGS.rbd_secret_uuid,
|
'secret_uuid': CONF.rbd_secret_uuid,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -787,17 +788,17 @@ class RBDDriver(VolumeDriver):
|
|||||||
# TODO(jdurgin): replace with librbd
|
# TODO(jdurgin): replace with librbd
|
||||||
# this is a temporary hack, since rewriting this driver
|
# this is a temporary hack, since rewriting this driver
|
||||||
# to use librbd would take too long
|
# to use librbd would take too long
|
||||||
if FLAGS.volume_tmp_dir and not os.exists(FLAGS.volume_tmp_dir):
|
if CONF.volume_tmp_dir and not os.exists(CONF.volume_tmp_dir):
|
||||||
os.makedirs(FLAGS.volume_tmp_dir)
|
os.makedirs(CONF.volume_tmp_dir)
|
||||||
|
|
||||||
with tempfile.NamedTemporaryFile(dir=FLAGS.volume_tmp_dir) as tmp:
|
with tempfile.NamedTemporaryFile(dir=CONF.volume_tmp_dir) as tmp:
|
||||||
image_service.download(context, image_id, tmp)
|
image_service.download(context, image_id, tmp)
|
||||||
# import creates the image, so we must remove it first
|
# import creates the image, so we must remove it first
|
||||||
self._try_execute('rbd', 'rm',
|
self._try_execute('rbd', 'rm',
|
||||||
'--pool', FLAGS.rbd_pool,
|
'--pool', CONF.rbd_pool,
|
||||||
volume['name'])
|
volume['name'])
|
||||||
self._try_execute('rbd', 'import',
|
self._try_execute('rbd', 'import',
|
||||||
'--pool', FLAGS.rbd_pool,
|
'--pool', CONF.rbd_pool,
|
||||||
tmp.name, volume['name'])
|
tmp.name, volume['name'])
|
||||||
|
|
||||||
|
|
||||||
@ -950,4 +951,4 @@ class LoggingVolumeDriver(VolumeDriver):
|
|||||||
|
|
||||||
|
|
||||||
def _iscsi_location(ip, target, iqn, lun=None):
|
def _iscsi_location(ip, target, iqn, lun=None):
|
||||||
return "%s:%s,%s %s %s" % (ip, FLAGS.iscsi_port, target, iqn, lun)
|
return "%s:%s,%s %s %s" % (ip, CONF.iscsi_port, target, iqn, lun)
|
||||||
|
@ -21,6 +21,7 @@ Helper code for the iSCSI volume driver.
|
|||||||
"""
|
"""
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova.openstack.common import cfg
|
from nova.openstack.common import cfg
|
||||||
@ -39,8 +40,8 @@ iscsi_helper_opt = [
|
|||||||
help='Volume configuration file storage directory'),
|
help='Volume configuration file storage directory'),
|
||||||
]
|
]
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
FLAGS.register_opts(iscsi_helper_opt)
|
CONF.register_opts(iscsi_helper_opt)
|
||||||
|
|
||||||
|
|
||||||
class TargetAdmin(object):
|
class TargetAdmin(object):
|
||||||
@ -110,7 +111,7 @@ class TgtAdm(TargetAdmin):
|
|||||||
# Note(jdg) tid and lun aren't used by TgtAdm but remain for
|
# Note(jdg) tid and lun aren't used by TgtAdm but remain for
|
||||||
# compatibility
|
# compatibility
|
||||||
|
|
||||||
fileutils.ensure_tree(FLAGS.volumes_dir)
|
fileutils.ensure_tree(CONF.volumes_dir)
|
||||||
|
|
||||||
vol_id = name.split(':')[1]
|
vol_id = name.split(':')[1]
|
||||||
volume_conf = """
|
volume_conf = """
|
||||||
@ -120,7 +121,7 @@ class TgtAdm(TargetAdmin):
|
|||||||
""" % (name, path)
|
""" % (name, path)
|
||||||
|
|
||||||
LOG.info(_('Creating volume: %s') % vol_id)
|
LOG.info(_('Creating volume: %s') % vol_id)
|
||||||
volumes_dir = FLAGS.volumes_dir
|
volumes_dir = CONF.volumes_dir
|
||||||
volume_path = os.path.join(volumes_dir, vol_id)
|
volume_path = os.path.join(volumes_dir, vol_id)
|
||||||
|
|
||||||
f = open(volume_path, 'w+')
|
f = open(volume_path, 'w+')
|
||||||
@ -140,7 +141,7 @@ class TgtAdm(TargetAdmin):
|
|||||||
os.unlink(volume_path)
|
os.unlink(volume_path)
|
||||||
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
|
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
|
||||||
|
|
||||||
iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_id)
|
iqn = '%s%s' % (CONF.iscsi_target_prefix, vol_id)
|
||||||
tid = self._get_target(iqn)
|
tid = self._get_target(iqn)
|
||||||
if tid is None:
|
if tid is None:
|
||||||
LOG.error(_("Failed to create iscsi target for volume "
|
LOG.error(_("Failed to create iscsi target for volume "
|
||||||
@ -153,9 +154,9 @@ class TgtAdm(TargetAdmin):
|
|||||||
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
|
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
|
||||||
LOG.info(_('Removing volume: %s') % vol_id)
|
LOG.info(_('Removing volume: %s') % vol_id)
|
||||||
vol_uuid_file = 'volume-%s' % vol_id
|
vol_uuid_file = 'volume-%s' % vol_id
|
||||||
volume_path = os.path.join(FLAGS.volumes_dir, vol_uuid_file)
|
volume_path = os.path.join(CONF.volumes_dir, vol_uuid_file)
|
||||||
if os.path.isfile(volume_path):
|
if os.path.isfile(volume_path):
|
||||||
iqn = '%s%s' % (FLAGS.iscsi_target_prefix,
|
iqn = '%s%s' % (CONF.iscsi_target_prefix,
|
||||||
vol_uuid_file)
|
vol_uuid_file)
|
||||||
else:
|
else:
|
||||||
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
|
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
|
||||||
@ -228,7 +229,7 @@ class IetAdm(TargetAdmin):
|
|||||||
|
|
||||||
|
|
||||||
def get_target_admin():
|
def get_target_admin():
|
||||||
if FLAGS.iscsi_helper == 'tgtadm':
|
if CONF.iscsi_helper == 'tgtadm':
|
||||||
return TgtAdm()
|
return TgtAdm()
|
||||||
else:
|
else:
|
||||||
return IetAdm()
|
return IetAdm()
|
||||||
|
11
nova/wsgi.py
11
nova/wsgi.py
@ -30,6 +30,7 @@ import routes.middleware
|
|||||||
import webob.dec
|
import webob.dec
|
||||||
import webob.exc
|
import webob.exc
|
||||||
|
|
||||||
|
from nova import config
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova.openstack.common import cfg
|
from nova.openstack.common import cfg
|
||||||
@ -44,8 +45,8 @@ wsgi_opts = [
|
|||||||
'into it: client_ip, date_time, request_line, status_code, '
|
'into it: client_ip, date_time, request_line, status_code, '
|
||||||
'body_length, wall_seconds.')
|
'body_length, wall_seconds.')
|
||||||
]
|
]
|
||||||
FLAGS = flags.FLAGS
|
CONF = config.CONF
|
||||||
FLAGS.register_opts(wsgi_opts)
|
CONF.register_opts(wsgi_opts)
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -95,7 +96,7 @@ class Server(object):
|
|||||||
protocol=self._protocol,
|
protocol=self._protocol,
|
||||||
custom_pool=self._pool,
|
custom_pool=self._pool,
|
||||||
log=self._wsgi_logger,
|
log=self._wsgi_logger,
|
||||||
log_format=FLAGS.wsgi_log_format)
|
log_format=CONF.wsgi_log_format)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
"""Stop this server.
|
"""Stop this server.
|
||||||
@ -362,11 +363,11 @@ class Loader(object):
|
|||||||
:returns: None
|
:returns: None
|
||||||
|
|
||||||
"""
|
"""
|
||||||
config_path = config_path or FLAGS.api_paste_config
|
config_path = config_path or CONF.api_paste_config
|
||||||
if os.path.exists(config_path):
|
if os.path.exists(config_path):
|
||||||
self.config_path = config_path
|
self.config_path = config_path
|
||||||
else:
|
else:
|
||||||
self.config_path = FLAGS.find_file(config_path)
|
self.config_path = CONF.find_file(config_path)
|
||||||
if not self.config_path:
|
if not self.config_path:
|
||||||
raise exception.ConfigNotFound(path=config_path)
|
raise exception.ConfigNotFound(path=config_path)
|
||||||
|
|
||||||
|
@ -33,7 +33,6 @@ from nova import utils
|
|||||||
from nova.virt.xenapi import driver as xenapi_driver
|
from nova.virt.xenapi import driver as xenapi_driver
|
||||||
from nova.virt.xenapi import vm_utils
|
from nova.virt.xenapi import vm_utils
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
destroy_opts = [
|
destroy_opts = [
|
||||||
cfg.BoolOpt('all_cached',
|
cfg.BoolOpt('all_cached',
|
||||||
default=False,
|
default=False,
|
||||||
@ -44,7 +43,8 @@ destroy_opts = [
|
|||||||
help='Don\'t actually delete the VDIs.')
|
help='Don\'t actually delete the VDIs.')
|
||||||
]
|
]
|
||||||
|
|
||||||
FLAGS.register_cli_opts(destroy_opts)
|
CONF = config.CONF
|
||||||
|
CONF.register_cli_opts(destroy_opts)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
@ -56,8 +56,8 @@ def main():
|
|||||||
|
|
||||||
sr_ref = vm_utils.safe_find_sr(session)
|
sr_ref = vm_utils.safe_find_sr(session)
|
||||||
destroyed = vm_utils.destroy_cached_images(
|
destroyed = vm_utils.destroy_cached_images(
|
||||||
session, sr_ref, all_cached=FLAGS.all_cached,
|
session, sr_ref, all_cached=CONF.all_cached,
|
||||||
dry_run=FLAGS.dry_run)
|
dry_run=CONF.dry_run)
|
||||||
|
|
||||||
if '--verbose' in sys.argv:
|
if '--verbose' in sys.argv:
|
||||||
print '\n'.join(destroyed)
|
print '\n'.join(destroyed)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user