PEP8 cleanup in nova/*, except for tests. There should be no functional changes here, just style changes to get violations down.

This commit is contained in:
Eric Day 2010-10-21 17:15:21 -07:00
parent 7af25f1476
commit 8c6ef13804
24 changed files with 273 additions and 235 deletions

View File

@ -47,19 +47,23 @@ class DbDriver(object):
def get_user(self, uid):
"""Retrieve user by id"""
return self._db_user_to_auth_user(db.user_get(context.get_admin_context(), uid))
user = db.user_get(context.get_admin_context(), uid)
return self._db_user_to_auth_user(user)
def get_user_from_access_key(self, access):
"""Retrieve user by access key"""
return self._db_user_to_auth_user(db.user_get_by_access_key(context.get_admin_context(), access))
user = db.user_get_by_access_key(context.get_admin_context(), access)
return self._db_user_to_auth_user(user)
def get_project(self, pid):
"""Retrieve project by id"""
return self._db_project_to_auth_projectuser(db.project_get(context.get_admin_context(), pid))
project = db.project_get(context.get_admin_context(), pid)
return self._db_project_to_auth_projectuser(project)
def get_users(self):
"""Retrieve list of users"""
return [self._db_user_to_auth_user(user) for user in db.user_get_all(context.get_admin_context())]
return [self._db_user_to_auth_user(user)
for user in db.user_get_all(context.get_admin_context())]
def get_projects(self, uid=None):
"""Retrieve list of projects"""
@ -71,11 +75,10 @@ class DbDriver(object):
def create_user(self, name, access_key, secret_key, is_admin):
"""Create a user"""
values = { 'id' : name,
'access_key' : access_key,
'secret_key' : secret_key,
'is_admin' : is_admin
}
values = {'id': name,
'access_key': access_key,
'secret_key': secret_key,
'is_admin': is_admin}
try:
user_ref = db.user_create(context.get_admin_context(), values)
return self._db_user_to_auth_user(user_ref)
@ -83,18 +86,19 @@ class DbDriver(object):
raise exception.Duplicate('User %s already exists' % name)
def _db_user_to_auth_user(self, user_ref):
return { 'id' : user_ref['id'],
'name' : user_ref['id'],
'access' : user_ref['access_key'],
'secret' : user_ref['secret_key'],
'admin' : user_ref['is_admin'] }
return {'id': user_ref['id'],
'name': user_ref['id'],
'access': user_ref['access_key'],
'secret': user_ref['secret_key'],
'admin': user_ref['is_admin']}
def _db_project_to_auth_projectuser(self, project_ref):
return { 'id' : project_ref['id'],
'name' : project_ref['name'],
'project_manager_id' : project_ref['project_manager'],
'description' : project_ref['description'],
'member_ids' : [member['id'] for member in project_ref['members']] }
member_ids = [member['id'] for member in project_ref['members']]
return {'id': project_ref['id'],
'name': project_ref['name'],
'project_manager_id': project_ref['project_manager'],
'description': project_ref['description'],
'member_ids': member_ids}
def create_project(self, name, manager_uid,
description=None, member_uids=None):
@ -121,10 +125,10 @@ class DbDriver(object):
% member_uid)
members.add(member)
values = { 'id' : name,
'name' : name,
'project_manager' : manager['id'],
'description': description }
values = {'id': name,
'name': name,
'project_manager': manager['id'],
'description': description}
try:
project = db.project_create(context.get_admin_context(), values)
@ -244,4 +248,3 @@ class DbDriver(object):
if not project:
raise exception.NotFound('Project "%s" not found' % project_id)
return user, project

View File

@ -35,6 +35,7 @@ flags.DEFINE_integer('redis_port', 6379,
'Port that redis is running on.')
flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away')
class Redis(object):
def __init__(self):
if hasattr(self.__class__, '_instance'):
@ -51,19 +52,19 @@ class Redis(object):
SCOPE_BASE = 0
SCOPE_ONELEVEL = 1 # not implemented
SCOPE_ONELEVEL = 1 # Not implemented
SCOPE_SUBTREE = 2
MOD_ADD = 0
MOD_DELETE = 1
MOD_REPLACE = 2
class NO_SUCH_OBJECT(Exception): # pylint: disable-msg=C0103
class NO_SUCH_OBJECT(Exception): # pylint: disable-msg=C0103
"""Duplicate exception class from real LDAP module."""
pass
class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable-msg=C0103
class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable-msg=C0103
"""Duplicate exception class from real LDAP module."""
pass
@ -251,8 +252,6 @@ class FakeLDAP(object):
return objects
@property
def __redis_prefix(self): # pylint: disable-msg=R0201
def __redis_prefix(self): # pylint: disable-msg=R0201
"""Get the prefix to use for all redis keys."""
return 'ldap:'

View File

@ -294,24 +294,26 @@ class LdapDriver(object):
def __find_dns(self, dn, query=None, scope=None):
"""Find dns by query"""
if scope is None: # one of the flags is 0!!
if scope is None:
# One of the flags is 0!
scope = self.ldap.SCOPE_SUBTREE
try:
res = self.conn.search_s(dn, scope, query)
except self.ldap.NO_SUCH_OBJECT:
return []
# just return the DNs
# Just return the DNs
return [dn for dn, _attributes in res]
def __find_objects(self, dn, query=None, scope=None):
"""Find objects by query"""
if scope is None: # one of the flags is 0!!
if scope is None:
# One of the flags is 0!
scope = self.ldap.SCOPE_SUBTREE
try:
res = self.conn.search_s(dn, scope, query)
except self.ldap.NO_SUCH_OBJECT:
return []
# just return the attributes
# Just return the attributes
return [attributes for dn, attributes in res]
def __find_role_dns(self, tree):
@ -480,6 +482,6 @@ class LdapDriver(object):
class FakeLdapDriver(LdapDriver):
"""Fake Ldap Auth driver"""
def __init__(self): # pylint: disable-msg=W0231
def __init__(self): # pylint: disable-msg=W0231
__import__('nova.auth.fakeldap')
self.ldap = sys.modules['nova.auth.fakeldap']

View File

@ -23,7 +23,7 @@ Nova authentication management
import logging
import os
import shutil
import string # pylint: disable-msg=W0402
import string # pylint: disable-msg=W0402
import tempfile
import uuid
import zipfile

View File

@ -49,7 +49,7 @@ class CloudPipe(object):
self.manager = manager.AuthManager()
def launch_vpn_instance(self, project_id):
logging.debug( "Launching VPN for %s" % (project_id))
logging.debug("Launching VPN for %s" % (project_id))
project = self.manager.get_project(project_id)
# Make a payload.zip
tmpfolder = tempfile.mkdtemp()
@ -57,16 +57,18 @@ class CloudPipe(object):
zippath = os.path.join(tmpfolder, filename)
z = zipfile.ZipFile(zippath, "w", zipfile.ZIP_DEFLATED)
z.write(FLAGS.boot_script_template,'autorun.sh')
z.write(FLAGS.boot_script_template, 'autorun.sh')
z.close()
key_name = self.setup_key_pair(project.project_manager_id, project_id)
zippy = open(zippath, "r")
context = context.RequestContext(user=project.project_manager, project=project)
context = context.RequestContext(user=project.project_manager,
project=project)
reservation = self.controller.run_instances(context,
# run instances expects encoded userdata, it is decoded in the get_metadata_call
# autorun.sh also decodes the zip file, hence the double encoding
# Run instances expects encoded userdata, it is decoded in the
# get_metadata_call. autorun.sh also decodes the zip file, hence
# the double encoding.
user_data=zippy.read().encode("base64").encode("base64"),
max_count=1,
min_count=1,
@ -79,12 +81,14 @@ class CloudPipe(object):
def setup_key_pair(self, user_id, project_id):
key_name = '%s%s' % (project_id, FLAGS.vpn_key_suffix)
try:
private_key, fingerprint = self.manager.generate_key_pair(user_id, key_name)
private_key, fingerprint = self.manager.generate_key_pair(user_id,
key_name)
try:
key_dir = os.path.join(FLAGS.keys_path, user_id)
if not os.path.exists(key_dir):
os.makedirs(key_dir)
with open(os.path.join(key_dir, '%s.pem' % key_name),'w') as f:
file_name = os.path.join(key_dir, '%s.pem' % key_name)
with open(file_name, 'w') as f:
f.write(private_key)
except:
pass
@ -95,9 +99,13 @@ class CloudPipe(object):
# def setup_secgroups(self, username):
# conn = self.euca.connection_for(username)
# try:
# secgroup = conn.create_security_group("vpn-secgroup", "vpn-secgroup")
# secgroup.authorize(ip_protocol = "udp", from_port = "1194", to_port = "1194", cidr_ip = "0.0.0.0/0")
# secgroup.authorize(ip_protocol = "tcp", from_port = "80", to_port = "80", cidr_ip = "0.0.0.0/0")
# secgroup.authorize(ip_protocol = "tcp", from_port = "22", to_port = "22", cidr_ip = "0.0.0.0/0")
# secgroup = conn.create_security_group("vpn-secgroup",
# "vpn-secgroup")
# secgroup.authorize(ip_protocol = "udp", from_port = "1194",
# to_port = "1194", cidr_ip = "0.0.0.0/0")
# secgroup.authorize(ip_protocol = "tcp", from_port = "80",
# to_port = "80", cidr_ip = "0.0.0.0/0")
# secgroup.authorize(ip_protocol = "tcp", from_port = "22",
# to_port = "22", cidr_ip = "0.0.0.0/0")
# except:
# pass

View File

@ -72,12 +72,12 @@ def partition(infile, outfile, local_bytes=0, resize=True,
" by sector size: %d / %d", local_bytes, sector_size)
local_sectors = local_bytes / sector_size
mbr_last = 62 # a
primary_first = mbr_last + 1 # b
primary_last = primary_first + primary_sectors - 1 # c
local_first = primary_last + 1 # d
local_last = local_first + local_sectors - 1 # e
last_sector = local_last # e
mbr_last = 62 # a
primary_first = mbr_last + 1 # b
primary_last = primary_first + primary_sectors - 1 # c
local_first = primary_last + 1 # d
local_last = local_first + local_sectors - 1 # e
last_sector = local_last # e
# create an empty file
yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
@ -157,7 +157,7 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
@defer.inlineCallbacks
def _inject_key_into_fs(key, fs, execute=None):
sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh')
yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
yield execute('sudo chown root %s' % sshdir)
yield execute('sudo chmod 700 %s' % sshdir)
keyfile = os.path.join(sshdir, 'authorized_keys')
@ -169,4 +169,3 @@ def _inject_net_into_fs(net, fs, execute=None):
netfile = os.path.join(os.path.join(os.path.join(
fs, 'etc'), 'network'), 'interfaces')
yield execute('sudo tee %s' % netfile, net)

View File

@ -85,8 +85,7 @@ RRD_VALUES = {
'RRA:MAX:0.5:6:800',
'RRA:MAX:0.5:24:800',
'RRA:MAX:0.5:444:800',
]
}
]}
utcnow = datetime.datetime.utcnow
@ -97,15 +96,12 @@ def update_rrd(instance, name, data):
Updates the specified RRD file.
"""
filename = os.path.join(instance.get_rrd_path(), '%s.rrd' % name)
if not os.path.exists(filename):
init_rrd(instance, name)
timestamp = int(time.mktime(utcnow().timetuple()))
rrdtool.update (
filename,
'%d:%s' % (timestamp, data)
)
rrdtool.update(filename, '%d:%s' % (timestamp, data))
def init_rrd(instance, name):
@ -113,29 +109,28 @@ def init_rrd(instance, name):
Initializes the specified RRD file.
"""
path = os.path.join(FLAGS.monitoring_rrd_path, instance.instance_id)
if not os.path.exists(path):
os.makedirs(path)
filename = os.path.join(path, '%s.rrd' % name)
if not os.path.exists(filename):
rrdtool.create (
rrdtool.create(
filename,
'--step', '%d' % FLAGS.monitoring_instances_step,
'--start', '0',
*RRD_VALUES[name]
)
*RRD_VALUES[name])
def graph_cpu(instance, duration):
"""
Creates a graph of cpu usage for the specified instance and duration.
"""
path = instance.get_rrd_path()
filename = os.path.join(path, 'cpu-%s.png' % duration)
rrdtool.graph (
rrdtool.graph(
filename,
'--disable-rrdtool-tag',
'--imgformat', 'PNG',
@ -146,9 +141,8 @@ def graph_cpu(instance, duration):
'-l', '0',
'-u', '100',
'DEF:cpu=%s:cpu:AVERAGE' % os.path.join(path, 'cpu.rrd'),
'AREA:cpu#eacc00:% CPU',
)
'AREA:cpu#eacc00:% CPU',)
store_graph(instance.instance_id, filename)
@ -158,8 +152,8 @@ def graph_net(instance, duration):
"""
path = instance.get_rrd_path()
filename = os.path.join(path, 'net-%s.png' % duration)
rrdtool.graph (
rrdtool.graph(
filename,
'--disable-rrdtool-tag',
'--imgformat', 'PNG',
@ -174,20 +168,19 @@ def graph_net(instance, duration):
'DEF:rx=%s:rx:AVERAGE' % os.path.join(path, 'net.rrd'),
'DEF:tx=%s:tx:AVERAGE' % os.path.join(path, 'net.rrd'),
'AREA:rx#00FF00:In traffic',
'LINE1:tx#0000FF:Out traffic',
)
'LINE1:tx#0000FF:Out traffic',)
store_graph(instance.instance_id, filename)
def graph_disk(instance, duration):
"""
Creates a graph of disk usage for the specified duration.
"""
"""
path = instance.get_rrd_path()
filename = os.path.join(path, 'disk-%s.png' % duration)
rrdtool.graph (
rrdtool.graph(
filename,
'--disable-rrdtool-tag',
'--imgformat', 'PNG',
@ -202,9 +195,8 @@ def graph_disk(instance, duration):
'DEF:rd=%s:rd:AVERAGE' % os.path.join(path, 'disk.rrd'),
'DEF:wr=%s:wr:AVERAGE' % os.path.join(path, 'disk.rrd'),
'AREA:rd#00FF00:Read',
'LINE1:wr#0000FF:Write',
)
'LINE1:wr#0000FF:Write',)
store_graph(instance.instance_id, filename)
@ -224,17 +216,16 @@ def store_graph(instance_id, filename):
is_secure=False,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
port=FLAGS.s3_port,
host=FLAGS.s3_host
)
host=FLAGS.s3_host)
bucket_name = '_%s.monitor' % instance_id
# Object store isn't creating the bucket like it should currently
# when it is first requested, so have to catch and create manually.
try:
bucket = s3.get_bucket(bucket_name)
except Exception:
bucket = s3.create_bucket(bucket_name)
key = boto.s3.Key(bucket)
key.key = os.path.basename(filename)
key.set_contents_from_filename(filename)
@ -247,18 +238,18 @@ class Instance(object):
self.last_updated = datetime.datetime.min
self.cputime = 0
self.cputime_last_updated = None
init_rrd(self, 'cpu')
init_rrd(self, 'net')
init_rrd(self, 'disk')
def needs_update(self):
"""
Indicates whether this instance is due to have its statistics updated.
"""
delta = utcnow() - self.last_updated
return delta.seconds >= FLAGS.monitoring_instances_step
def update(self):
"""
Updates the instances statistics and stores the resulting graphs
@ -271,7 +262,7 @@ class Instance(object):
if data != None:
logging.debug('CPU: %s', data)
update_rrd(self, 'cpu', data)
data = self.fetch_net_stats()
logging.debug('NET: %s', data)
update_rrd(self, 'net', data)
@ -279,7 +270,7 @@ class Instance(object):
data = self.fetch_disk_stats()
logging.debug('DISK: %s', data)
update_rrd(self, 'disk', data)
# TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls
# and make the methods @defer.inlineCallbacks.
graph_cpu(self, '1d')
@ -297,13 +288,13 @@ class Instance(object):
logging.exception('unexpected error during update')
self.last_updated = utcnow()
def get_rrd_path(self):
"""
Returns the path to where RRD files are stored.
"""
return os.path.join(FLAGS.monitoring_rrd_path, self.instance_id)
def fetch_cpu_stats(self):
"""
Returns cpu usage statistics for this instance.
@ -327,17 +318,17 @@ class Instance(object):
# Calculate the number of seconds between samples.
d = self.cputime_last_updated - cputime_last_updated
t = d.days * 86400 + d.seconds
logging.debug('t = %d', t)
# Calculate change over time in number of nanoseconds of CPU time used.
cputime_delta = self.cputime - cputime_last
logging.debug('cputime_delta = %s', cputime_delta)
# Get the number of virtual cpus in this domain.
vcpus = int(info['num_cpu'])
logging.debug('vcpus = %d', vcpus)
# Calculate CPU % used and cap at 100.
@ -349,9 +340,9 @@ class Instance(object):
"""
rd = 0
wr = 0
disks = self.conn.get_disks(self.instance_id)
# Aggregate the read and write totals.
for disk in disks:
try:
@ -363,7 +354,7 @@ class Instance(object):
logging.error('Cannot get blockstats for "%s" on "%s"',
disk, self.instance_id)
raise
return '%d:%d' % (rd, wr)
def fetch_net_stats(self):
@ -372,9 +363,9 @@ class Instance(object):
"""
rx = 0
tx = 0
interfaces = self.conn.get_interfaces(self.instance_id)
# Aggregate the in and out totals.
for interface in interfaces:
try:
@ -385,7 +376,7 @@ class Instance(object):
logging.error('Cannot get ifstats for "%s" on "%s"',
interface, self.instance_id)
raise
return '%d:%d' % (rx, tx)
@ -400,16 +391,16 @@ class InstanceMonitor(object, service.Service):
"""
self._instances = {}
self._loop = task.LoopingCall(self.updateInstances)
def startService(self):
self._instances = {}
self._loop.start(interval=FLAGS.monitoring_instances_delay)
service.Service.startService(self)
def stopService(self):
self._loop.stop()
service.Service.stopService(self)
def updateInstances(self):
"""
Update resource usage for all running instances.
@ -420,20 +411,20 @@ class InstanceMonitor(object, service.Service):
logging.exception('unexpected exception getting connection')
time.sleep(FLAGS.monitoring_instances_delay)
return
domain_ids = conn.list_instances()
try:
self.updateInstances_(conn, domain_ids)
self.updateInstances_(conn, domain_ids)
except Exception, exn:
logging.exception('updateInstances_')
logging.exception('updateInstances_')
def updateInstances_(self, conn, domain_ids):
for domain_id in domain_ids:
if not domain_id in self._instances:
if not domain_id in self._instances:
instance = Instance(conn, domain_id)
self._instances[domain_id] = instance
logging.debug('Found instance: %s', domain_id)
for key in self._instances.keys():
instance = self._instances[key]
if instance.needs_update():

View File

@ -30,12 +30,11 @@ CRASHED = 0x06
def name(code):
d = {
NOSTATE : 'pending',
RUNNING : 'running',
BLOCKED : 'blocked',
PAUSED : 'paused',
NOSTATE: 'pending',
RUNNING: 'running',
BLOCKED: 'blocked',
PAUSED: 'paused',
SHUTDOWN: 'shutdown',
SHUTOFF : 'shutdown',
CRASHED : 'crashed',
}
SHUTOFF: 'shutdown',
CRASHED: 'crashed'}
return d[code]

View File

@ -30,7 +30,8 @@ flags.DEFINE_string('glance_teller_address', 'http://127.0.0.1',
flags.DEFINE_string('glance_teller_port', '9191',
'Port for Glance\'s Teller service')
flags.DEFINE_string('glance_parallax_address', 'http://127.0.0.1',
'IP address or URL where Glance\'s Parallax service resides')
'IP address or URL where Glance\'s Parallax service '
'resides')
flags.DEFINE_string('glance_parallax_port', '9292',
'Port for Glance\'s Parallax service')
@ -120,10 +121,10 @@ class BaseImageService(object):
def delete(self, image_id):
"""
Delete the given image.
Delete the given image.
:raises NotFound if the image does not exist.
"""
raise NotImplementedError
@ -131,14 +132,14 @@ class BaseImageService(object):
class LocalImageService(BaseImageService):
"""Image service storing images to local disk.
It assumes that image_ids are integers."""
def __init__(self):
self._path = "/tmp/nova/images"
try:
os.makedirs(self._path)
except OSError: # exists
except OSError: # Exists
pass
def _path_to(self, image_id):
@ -156,7 +157,7 @@ class LocalImageService(BaseImageService):
def show(self, id):
try:
return pickle.load(open(self._path_to(id)))
return pickle.load(open(self._path_to(id)))
except IOError:
raise exception.NotFound
@ -164,7 +165,7 @@ class LocalImageService(BaseImageService):
"""
Store the image data and return the new image id.
"""
id = random.randint(0, 2**32-1)
id = random.randint(0, 2 ** 32 - 1)
data['id'] = id
self.update(id, data)
return id

View File

@ -30,6 +30,7 @@ import nova.image.service
FLAGS = flags.FLAGS
class TellerClient(object):
def __init__(self):
@ -153,7 +154,6 @@ class ParallaxClient(object):
class GlanceImageService(nova.image.service.BaseImageService):
"""Provides storage and retrieval of disk image objects within Glance."""
def __init__(self):
@ -202,10 +202,10 @@ class GlanceImageService(nova.image.service.BaseImageService):
def delete(self, image_id):
"""
Delete the given image.
Delete the given image.
:raises NotFound if the image does not exist.
"""
self.parallax.delete_image_metadata(image_id)

View File

@ -53,6 +53,7 @@ flags.DEFINE_bool('use_nova_chains', False,
DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)]
def init_host():
"""Basic networking setup goes here"""
# NOTE(devcamcar): Cloud public DNAT entries, CloudPipe port
@ -72,6 +73,7 @@ def init_host():
_confirm_rule("POSTROUTING", "-t nat -s %(range)s -d %(range)s -j ACCEPT" %
{'range': FLAGS.fixed_range})
def bind_floating_ip(floating_ip):
"""Bind ip to public interface"""
_execute("sudo ip addr add %s dev %s" % (floating_ip,
@ -103,7 +105,7 @@ def ensure_floating_forward(floating_ip, fixed_ip):
_confirm_rule("FORWARD", "-d %s -p icmp -j ACCEPT"
% (fixed_ip))
for (protocol, port) in DEFAULT_PORTS:
_confirm_rule("FORWARD","-d %s -p %s --dport %s -j ACCEPT"
_confirm_rule("FORWARD", "-d %s -p %s --dport %s -j ACCEPT"
% (fixed_ip, protocol, port))
@ -189,7 +191,8 @@ def update_dhcp(context, network_id):
# if dnsmasq is already running, then tell it to reload
if pid:
out, _err = _execute('cat /proc/%d/cmdline' % pid, check_exit_code=False)
out, _err = _execute('cat /proc/%d/cmdline' % pid,
check_exit_code=False)
if conffile in out:
try:
_execute('sudo kill -HUP %d' % pid)
@ -233,7 +236,8 @@ def _confirm_rule(chain, cmd):
"""Delete and re-add iptables rule"""
if FLAGS.use_nova_chains:
chain = "nova_%s" % chain.lower()
_execute("sudo iptables --delete %s %s" % (chain, cmd), check_exit_code=False)
_execute("sudo iptables --delete %s %s" % (chain, cmd),
check_exit_code=False)
_execute("sudo iptables -I %s %s" % (chain, cmd))

View File

@ -49,7 +49,8 @@ flags.DEFINE_string('vpn_ip', utils.get_my_ip(),
flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks')
flags.DEFINE_integer('network_size', 256,
'Number of addresses in each private subnet')
flags.DEFINE_string('floating_range', '4.4.4.0/24', 'Floating IP address block')
flags.DEFINE_string('floating_range', '4.4.4.0/24',
'Floating IP address block')
flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block')
flags.DEFINE_integer('cnt_vpn_clients', 5,
'Number of addresses reserved for vpn clients')
@ -287,7 +288,6 @@ class FlatManager(NetworkManager):
self.db.network_update(context, network_id, net)
class FlatDHCPManager(NetworkManager):
"""Flat networking with dhcp"""
@ -432,4 +432,3 @@ class VlanManager(NetworkManager):
"""Number of reserved ips at the top of the range"""
parent_reserved = super(VlanManager, self)._top_reserved_ips
return parent_reserved + FLAGS.cnt_vpn_clients

View File

@ -69,7 +69,8 @@ class Bucket(object):
"""Create a new bucket owned by a project.
@bucket_name: a string representing the name of the bucket to create
@context: a nova.auth.api.ApiContext object representing who owns the bucket.
@context: a nova.auth.api.ApiContext object representing who owns the
bucket.
Raises:
NotAuthorized: if the bucket is already exists or has invalid name
@ -77,12 +78,12 @@ class Bucket(object):
path = os.path.abspath(os.path.join(
FLAGS.buckets_path, bucket_name))
if not path.startswith(os.path.abspath(FLAGS.buckets_path)) or \
os.path.exists(path):
raise exception.NotAuthorized()
os.path.exists(path):
raise exception.NotAuthorized()
os.makedirs(path)
with open(path+'.json', 'w') as f:
with open(path + '.json', 'w') as f:
json.dump({'ownerId': context.project_id}, f)
@property
@ -99,22 +100,25 @@ class Bucket(object):
@property
def owner_id(self):
try:
with open(self.path+'.json') as f:
with open(self.path + '.json') as f:
return json.load(f)['ownerId']
except:
return None
def is_authorized(self, context):
try:
return context.user.is_admin() or self.owner_id == context.project_id
return context.user.is_admin() or \
self.owner_id == context.project_id
except Exception, e:
return False
def list_keys(self, prefix='', marker=None, max_keys=1000, terse=False):
object_names = []
path_length = len(self.path)
for root, dirs, files in os.walk(self.path):
for file_name in files:
object_names.append(os.path.join(root, file_name)[len(self.path)+1:])
object_name = os.path.join(root, file_name)[path_length + 1:]
object_names.append(object_name)
object_names.sort()
contents = []
@ -164,7 +168,7 @@ class Bucket(object):
if len(os.listdir(self.path)) > 0:
raise exception.NotEmpty()
os.rmdir(self.path)
os.remove(self.path+'.json')
os.remove(self.path + '.json')
def __getitem__(self, key):
return stored.Object(self, key)

View File

@ -136,6 +136,7 @@ def get_context(request):
logging.debug("Authentication Failure: %s", ex)
raise exception.NotAuthorized()
class ErrorHandlingResource(resource.Resource):
"""Maps exceptions to 404 / 401 codes. Won't work for
exceptions thrown after NOT_DONE_YET is returned.
@ -162,7 +163,7 @@ class S3(ErrorHandlingResource):
def __init__(self):
ErrorHandlingResource.__init__(self)
def getChild(self, name, request): # pylint: disable-msg=C0103
def getChild(self, name, request): # pylint: disable-msg=C0103
"""Returns either the image or bucket resource"""
request.context = get_context(request)
if name == '':
@ -172,7 +173,7 @@ class S3(ErrorHandlingResource):
else:
return BucketResource(name)
def render_GET(self, request): # pylint: disable-msg=R0201
def render_GET(self, request): # pylint: disable-msg=R0201
"""Renders the GET request for a list of buckets as XML"""
logging.debug('List of buckets requested')
buckets = [b for b in bucket.Bucket.all() \
@ -321,11 +322,13 @@ class ImageResource(ErrorHandlingResource):
if not self.img.is_authorized(request.context, True):
raise exception.NotAuthorized()
return static.File(self.img.image_path,
defaultType='application/octet-stream'
).render_GET(request)
defaultType='application/octet-stream').\
render_GET(request)
class ImagesResource(resource.Resource):
"""A web resource representing a list of images"""
def getChild(self, name, _request):
"""Returns itself or an ImageResource if no name given"""
if name == '':
@ -333,7 +336,7 @@ class ImagesResource(resource.Resource):
else:
return ImageResource(name)
def render_GET(self, request): # pylint: disable-msg=R0201
def render_GET(self, request): # pylint: disable-msg=R0201
""" returns a json listing of all images
that a user has permissions to see """
@ -362,7 +365,7 @@ class ImagesResource(resource.Resource):
request.finish()
return server.NOT_DONE_YET
def render_PUT(self, request): # pylint: disable-msg=R0201
def render_PUT(self, request): # pylint: disable-msg=R0201
""" create a new registered image """
image_id = get_argument(request, 'image_id', u'')
@ -383,7 +386,7 @@ class ImagesResource(resource.Resource):
p.start()
return ''
def render_POST(self, request): # pylint: disable-msg=R0201
def render_POST(self, request): # pylint: disable-msg=R0201
"""Update image attributes: public/private"""
# image_id required for all requests
@ -397,7 +400,7 @@ class ImagesResource(resource.Resource):
if operation:
# operation implies publicity toggle
logging.debug("handling publicity toggle")
image_object.set_public(operation=='add')
image_object.set_public(operation == 'add')
else:
# other attributes imply update
logging.debug("update user fields")
@ -407,7 +410,7 @@ class ImagesResource(resource.Resource):
image_object.update_user_editable_fields(clean_args)
return ''
def render_DELETE(self, request): # pylint: disable-msg=R0201
def render_DELETE(self, request): # pylint: disable-msg=R0201
"""Delete a registered image"""
image_id = get_argument(request, "image_id", u"")
image_object = image.Image(image_id)

View File

@ -48,8 +48,8 @@ class Image(object):
self.image_id = image_id
self.path = os.path.abspath(os.path.join(FLAGS.images_path, image_id))
if not self.path.startswith(os.path.abspath(FLAGS.images_path)) or \
not os.path.isdir(self.path):
raise exception.NotFound
not os.path.isdir(self.path):
raise exception.NotFound
@property
def image_path(self):
@ -127,8 +127,8 @@ class Image(object):
a string of the image id for the kernel
@type ramdisk: bool or str
@param ramdisk: either TRUE meaning this partition is a ramdisk image or
a string of the image id for the ramdisk
@param ramdisk: either TRUE meaning this partition is a ramdisk image
or a string of the image id for the ramdisk
@type public: bool
@ -160,8 +160,7 @@ class Image(object):
'isPublic': public,
'architecture': 'x86_64',
'imageType': image_type,
'state': 'available'
}
'state': 'available'}
if type(kernel) is str and len(kernel) > 0:
info['kernelId'] = kernel
@ -180,7 +179,7 @@ class Image(object):
os.makedirs(image_path)
bucket_name = image_location.split("/")[0]
manifest_path = image_location[len(bucket_name)+1:]
manifest_path = image_location[len(bucket_name) + 1:]
bucket_object = bucket.Bucket(bucket_name)
manifest = ElementTree.fromstring(bucket_object[manifest_path].read())
@ -204,10 +203,9 @@ class Image(object):
'imageId': image_id,
'imageLocation': image_location,
'imageOwnerId': context.project_id,
'isPublic': False, # FIXME: grab public from manifest
'architecture': 'x86_64', # FIXME: grab architecture from manifest
'imageType' : image_type
}
'isPublic': False, # FIXME: grab public from manifest
'architecture': 'x86_64', # FIXME: grab architecture from manifest
'imageType': image_type}
if kernel_id:
info['kernelId'] = kernel_id
@ -230,24 +228,29 @@ class Image(object):
write_state('decrypting')
# FIXME: grab kernelId and ramdiskId from bundle manifest
encrypted_key = binascii.a2b_hex(manifest.find("image/ec2_encrypted_key").text)
encrypted_iv = binascii.a2b_hex(manifest.find("image/ec2_encrypted_iv").text)
hex_key = manifest.find("image/ec2_encrypted_key").text
encrypted_key = binascii.a2b_hex(hex_key)
hex_iv = manifest.find("image/ec2_encrypted_iv").text
encrypted_iv = binascii.a2b_hex(hex_iv)
cloud_private_key = os.path.join(FLAGS.ca_path, "private/cakey.pem")
decrypted_filename = os.path.join(image_path, 'image.tar.gz')
Image.decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename)
Image.decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
cloud_private_key, decrypted_filename)
write_state('untarring')
image_file = Image.untarzip_image(image_path, decrypted_filename)
shutil.move(os.path.join(image_path, image_file), os.path.join(image_path, 'image'))
shutil.move(os.path.join(image_path, image_file),
os.path.join(image_path, 'image'))
write_state('available')
os.unlink(decrypted_filename)
os.unlink(encrypted_filename)
@staticmethod
def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename):
def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
cloud_private_key, decrypted_filename):
key, err = utils.execute(
'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
process_input=encrypted_key,
@ -259,13 +262,15 @@ class Image(object):
process_input=encrypted_iv,
check_exit_code=False)
if err:
raise exception.Error("Failed to decrypt initialization vector: %s" % err)
raise exception.Error("Failed to decrypt initialization "
"vector: %s" % err)
_out, err = utils.execute(
'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s'
% (encrypted_filename, key, iv, decrypted_filename),
check_exit_code=False)
if err:
raise exception.Error("Failed to decrypt image file %s : %s" % (encrypted_filename, err))
raise exception.Error("Failed to decrypt image file %s : %s" %
(encrypted_filename, err))
@staticmethod
def untarzip_image(path, filename):

View File

@ -50,8 +50,8 @@ class Object(object):
return os.path.getmtime(self.path)
def read(self):
""" read all contents of key into memory and return """
return self.file.read()
""" read all contents of key into memory and return """
return self.file.read()
@property
def file(self):

View File

@ -31,10 +31,12 @@ FLAGS = flags.FLAGS
flags.DEFINE_integer('service_down_time', 60,
'maximum time since last checkin for up service')
class NoValidHost(exception.Error):
"""There is no valid host for the command."""
pass
class Scheduler(object):
"""The base class that all Scheduler clases should inherit from."""

View File

@ -56,7 +56,8 @@ class SchedulerManager(manager.Manager):
driver_method = 'schedule_%s' % method
elevated = context.elevated()
try:
host = getattr(self.driver, driver_method)(elevated, *args, **kwargs)
host = getattr(self.driver, driver_method)(elevated, *args,
**kwargs)
except AttributeError:
host = self.driver.schedule(elevated, topic, *args, **kwargs)

View File

@ -36,6 +36,7 @@ flags.DEFINE_integer("max_gigabytes", 10000,
flags.DEFINE_integer("max_networks", 1000,
"maximum number of networks to allow per host")
class SimpleScheduler(chance.ChanceScheduler):
"""Implements Naive Scheduler that tries to find least loaded host."""

View File

@ -226,6 +226,7 @@ class FakeConnection(object):
def get_console_output(self, instance):
return 'FAKE CONSOLE OUTPUT'
class FakeInstance(object):
def __init__(self):
self._state = power_state.NOSTATE

View File

@ -62,8 +62,8 @@ def _fetch_s3_image(image, path, user, project):
headers['Authorization'] = 'AWS %s:%s' % (access, signature)
cmd = ['/usr/bin/curl', '--fail', '--silent', url]
for (k,v) in headers.iteritems():
cmd += ['-H', '%s: %s' % (k,v)]
for (k, v) in headers.iteritems():
cmd += ['-H', '%s: %s' % (k, v)]
cmd += ['-o', path]
return process.SharedPool().execute(executable=cmd[0], args=cmd[1:])

View File

@ -62,7 +62,8 @@ flags.DEFINE_string('injected_network_template',
'Template file for injected network')
flags.DEFINE_string('libvirt_type',
'kvm',
'Libvirt domain type (valid options are: kvm, qemu, uml, xen)')
'Libvirt domain type (valid options are: '
'kvm, qemu, uml, xen)')
flags.DEFINE_string('libvirt_uri',
'',
'Override the default libvirt URI (which is dependent'
@ -96,7 +97,8 @@ class LibvirtConnection(object):
def _conn(self):
if not self._wrapped_conn or not self._test_connection():
logging.debug('Connecting to libvirt: %s' % self.libvirt_uri)
self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only)
self._wrapped_conn = self._connect(self.libvirt_uri,
self.read_only)
return self._wrapped_conn
def _test_connection(self):
@ -150,6 +152,7 @@ class LibvirtConnection(object):
# WE'LL save this for when we do shutdown,
# instead of destroy - but destroy returns immediately
timer = task.LoopingCall(f=None)
def _wait_for_shutdown():
try:
state = self.get_info(instance['name'])['state']
@ -164,6 +167,7 @@ class LibvirtConnection(object):
power_state.SHUTDOWN)
timer.stop()
d.callback(None)
timer.f = _wait_for_shutdown
timer.start(interval=0.5, now=True)
return d
@ -201,6 +205,7 @@ class LibvirtConnection(object):
d = defer.Deferred()
timer = task.LoopingCall(f=None)
def _wait_for_reboot():
try:
state = self.get_info(instance['name'])['state']
@ -217,6 +222,7 @@ class LibvirtConnection(object):
power_state.SHUTDOWN)
timer.stop()
d.callback(None)
timer.f = _wait_for_reboot
timer.start(interval=0.5, now=True)
yield d
@ -229,7 +235,8 @@ class LibvirtConnection(object):
instance['id'],
power_state.NOSTATE,
'launching')
yield NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance)
yield NWFilterFirewall(self._conn).\
setup_nwfilters_for_instance(instance)
yield self._create_image(instance, xml)
yield self._conn.createXML(xml, 0)
# TODO(termie): this should actually register
@ -238,6 +245,7 @@ class LibvirtConnection(object):
local_d = defer.Deferred()
timer = task.LoopingCall(f=None)
def _wait_for_boot():
try:
state = self.get_info(instance['name'])['state']
@ -265,8 +273,9 @@ class LibvirtConnection(object):
if virsh_output.startswith('/dev/'):
logging.info('cool, it\'s a device')
d = process.simple_execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False)
d.addCallback(lambda r:r[0])
d = process.simple_execute("sudo dd if=%s iflag=nonblock" %
virsh_output, check_exit_code=False)
d.addCallback(lambda r: r[0])
return d
else:
return ''
@ -285,11 +294,15 @@ class LibvirtConnection(object):
@exception.wrap_exception
def get_console_output(self, instance):
console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log')
d = process.simple_execute('sudo chown %d %s' % (os.getuid(), console_log))
console_log = os.path.join(FLAGS.instances_path, instance['name'],
'console.log')
d = process.simple_execute('sudo chown %d %s' % (os.getuid(),
console_log))
if FLAGS.libvirt_type == 'xen':
# Xen is spethial
d.addCallback(lambda _: process.simple_execute("virsh ttyconsole %s" % instance['name']))
d.addCallback(lambda _:
process.simple_execute("virsh ttyconsole %s" %
instance['name']))
d.addCallback(self._flush_xen_console)
d.addCallback(self._append_to_file, console_log)
else:
@ -297,7 +310,6 @@ class LibvirtConnection(object):
d.addCallback(self._dump_file)
return d
@defer.inlineCallbacks
def _create_image(self, inst, libvirt_xml):
# syntactic nicety
@ -309,7 +321,6 @@ class LibvirtConnection(object):
yield process.simple_execute('mkdir -p %s' % basepath())
yield process.simple_execute('chmod 0777 %s' % basepath())
# TODO(termie): these are blocking calls, it would be great
# if they weren't.
logging.info('instance %s: Creating image', inst['name'])
@ -317,17 +328,21 @@ class LibvirtConnection(object):
f.write(libvirt_xml)
f.close()
os.close(os.open(basepath('console.log'), os.O_CREAT | os.O_WRONLY, 0660))
os.close(os.open(basepath('console.log'), os.O_CREAT | os.O_WRONLY,
0660))
user = manager.AuthManager().get_user(inst['user_id'])
project = manager.AuthManager().get_project(inst['project_id'])
if not os.path.exists(basepath('disk')):
yield images.fetch(inst.image_id, basepath('disk-raw'), user, project)
yield images.fetch(inst.image_id, basepath('disk-raw'), user,
project)
if not os.path.exists(basepath('kernel')):
yield images.fetch(inst.kernel_id, basepath('kernel'), user, project)
yield images.fetch(inst.kernel_id, basepath('kernel'), user,
project)
if not os.path.exists(basepath('ramdisk')):
yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user, project)
yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user,
project)
execute = lambda cmd, process_input=None, check_exit_code=True: \
process.simple_execute(cmd=cmd,
@ -339,8 +354,8 @@ class LibvirtConnection(object):
network_ref = db.network_get_by_instance(context.get_admin_context(),
inst['id'])
if network_ref['injected']:
address = db.instance_get_fixed_address(context.get_admin_context(),
inst['id'])
admin_context = context.get_admin_context()
address = db.instance_get_fixed_address(admin_context, inst['id'])
with open(FLAGS.injected_network_template) as f:
net = f.read() % {'address': address,
'netmask': network_ref['netmask'],
@ -354,7 +369,8 @@ class LibvirtConnection(object):
if net:
logging.info('instance %s: injecting net into image %s',
inst['name'], inst.image_id)
yield disk.inject_data(basepath('disk-raw'), key, net, execute=execute)
yield disk.inject_data(basepath('disk-raw'), key, net,
execute=execute)
if os.path.exists(basepath('disk')):
yield process.simple_execute('rm -f %s' % basepath('disk'))
@ -377,7 +393,8 @@ class LibvirtConnection(object):
network = db.project_get_network(context.get_admin_context(),
instance['project_id'])
# FIXME(vish): stick this in db
instance_type = instance_types.INSTANCE_TYPES[instance['instance_type']]
instance_type = instance['instance_type']
instance_type = instance_types.INSTANCE_TYPES[instance_type]
ip_address = db.instance_get_fixed_address(context.get_admin_context(),
instance['id'])
# Assume that the gateway also acts as the dhcp server.
@ -391,7 +408,7 @@ class LibvirtConnection(object):
'bridge_name': network['bridge'],
'mac_address': instance['mac_address'],
'ip_address': ip_address,
'dhcp_server': dhcp_server }
'dhcp_server': dhcp_server}
libvirt_xml = self.libvirt_xml % xml_info
logging.debug('instance %s: finished toXML method', instance['name'])
@ -506,7 +523,6 @@ class LibvirtConnection(object):
domain = self._conn.lookupByName(instance_name)
return domain.interfaceStats(interface)
def refresh_security_group(self, security_group_id):
fw = NWFilterFirewall(self._conn)
fw.ensure_security_group_filter(security_group_id)
@ -557,7 +573,6 @@ class NWFilterFirewall(object):
def __init__(self, get_connection):
self._conn = get_connection
nova_base_filter = '''<filter name='nova-base' chain='root'>
<uuid>26717364-50cf-42d1-8185-29bf893ab110</uuid>
<filterref filter='no-mac-spoofing'/>
@ -578,7 +593,8 @@ class NWFilterFirewall(object):
srcportstart='68'
dstportstart='67'/>
</rule>
<rule action='accept' direction='in' priority='100'>
<rule action='accept' direction='in'
priority='100'>
<udp srcipaddr='$DHCPSERVER'
srcportstart='67'
dstportstart='68'/>
@ -588,8 +604,8 @@ class NWFilterFirewall(object):
def nova_base_ipv4_filter(self):
retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
for protocol in ['tcp', 'udp', 'icmp']:
for direction,action,priority in [('out','accept', 399),
('inout','drop', 400)]:
for direction, action, priority in [('out', 'accept', 399),
('inout', 'drop', 400)]:
retval += """<rule action='%s' direction='%s' priority='%d'>
<%s />
</rule>""" % (action, direction,
@ -597,12 +613,11 @@ class NWFilterFirewall(object):
retval += '</filter>'
return retval
def nova_base_ipv6_filter(self):
retval = "<filter name='nova-base-ipv6' chain='ipv6'>"
for protocol in ['tcp', 'udp', 'icmp']:
for direction,action,priority in [('out','accept',399),
('inout','drop',400)]:
for direction, action, priority in [('out', 'accept', 399),
('inout', 'drop', 400)]:
retval += """<rule action='%s' direction='%s' priority='%d'>
<%s-ipv6 />
</rule>""" % (action, direction,
@ -610,7 +625,6 @@ class NWFilterFirewall(object):
retval += '</filter>'
return retval
def nova_project_filter(self, project, net, mask):
retval = "<filter name='nova-project-%s' chain='ipv4'>" % project
for protocol in ['tcp', 'udp', 'icmp']:
@ -620,14 +634,12 @@ class NWFilterFirewall(object):
retval += '</filter>'
return retval
def _define_filter(self, xml):
if callable(xml):
xml = xml()
d = threads.deferToThread(self._conn.nwfilterDefineXML, xml)
return d
@staticmethod
def _get_net_and_mask(cidr):
net = IPy.IP(cidr)
@ -646,9 +658,9 @@ class NWFilterFirewall(object):
yield self._define_filter(self.nova_dhcp_filter)
yield self._define_filter(self.nova_base_filter)
nwfilter_xml = ("<filter name='nova-instance-%s' chain='root'>\n" +
" <filterref filter='nova-base' />\n"
) % instance['name']
nwfilter_xml = "<filter name='nova-instance-%s' chain='root'>\n" \
" <filterref filter='nova-base' />\n" % \
instance['name']
if FLAGS.allow_project_net_traffic:
network_ref = db.project_get_network(context.get_admin_context(),
@ -658,14 +670,14 @@ class NWFilterFirewall(object):
net, mask)
yield self._define_filter(project_filter)
nwfilter_xml += (" <filterref filter='nova-project-%s' />\n"
) % instance['project_id']
nwfilter_xml += " <filterref filter='nova-project-%s' />\n" % \
instance['project_id']
for security_group in instance.security_groups:
yield self.ensure_security_group_filter(security_group['id'])
nwfilter_xml += (" <filterref filter='nova-secgroup-%d' />\n"
) % security_group['id']
nwfilter_xml += " <filterref filter='nova-secgroup-%d' />\n" % \
security_group['id']
nwfilter_xml += "</filter>"
yield self._define_filter(nwfilter_xml)
@ -675,7 +687,6 @@ class NWFilterFirewall(object):
return self._define_filter(
self.security_group_to_nwfilter_xml(security_group_id))
def security_group_to_nwfilter_xml(self, security_group_id):
security_group = db.security_group_get(context.get_admin_context(),
security_group_id)
@ -684,12 +695,15 @@ class NWFilterFirewall(object):
rule_xml += "<rule action='accept' direction='in' priority='300'>"
if rule.cidr:
net, mask = self._get_net_and_mask(rule.cidr)
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % (rule.protocol, net, mask)
rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
(rule.protocol, net, mask)
if rule.protocol in ['tcp', 'udp']:
rule_xml += "dstportstart='%s' dstportend='%s' " % \
(rule.from_port, rule.to_port)
elif rule.protocol == 'icmp':
logging.info('rule.protocol: %r, rule.from_port: %r, rule.to_port: %r' % (rule.protocol, rule.from_port, rule.to_port))
logging.info('rule.protocol: %r, rule.from_port: %r, '
'rule.to_port: %r' %
(rule.protocol, rule.from_port, rule.to_port))
if rule.from_port != -1:
rule_xml += "type='%s' " % rule.from_port
if rule.to_port != -1:
@ -697,5 +711,6 @@ class NWFilterFirewall(object):
rule_xml += '/>\n'
rule_xml += "</rule>\n"
xml = '''<filter name='nova-secgroup-%s' chain='ipv4'>%s</filter>''' % (security_group_id, rule_xml,)
xml = "<filter name='nova-secgroup-%s' chain='ipv4'>%s</filter>" % \
(security_group_id, rule_xml,)
return xml

View File

@ -75,12 +75,11 @@ flags.DEFINE_float('xenapi_task_poll_interval',
XENAPI_POWER_STATE = {
'Halted' : power_state.SHUTDOWN,
'Running' : power_state.RUNNING,
'Paused' : power_state.PAUSED,
'Suspended': power_state.SHUTDOWN, # FIXME
'Crashed' : power_state.CRASHED
}
'Halted': power_state.SHUTDOWN,
'Running': power_state.RUNNING,
'Paused': power_state.PAUSED,
'Suspended': power_state.SHUTDOWN, # FIXME
'Crashed': power_state.CRASHED}
def get_connection(_):
@ -90,12 +89,15 @@ def get_connection(_):
# library when not using XenAPI.
global XenAPI
if XenAPI is None:
XenAPI = __import__('XenAPI')
XenAPI = __import__('XenAPI')
url = FLAGS.xenapi_connection_url
username = FLAGS.xenapi_connection_username
password = FLAGS.xenapi_connection_password
if not url or password is None:
raise Exception('Must specify xenapi_connection_url, xenapi_connection_username (optionally), and xenapi_connection_password to use connection_type=xenapi')
raise Exception('Must specify xenapi_connection_url, '
'xenapi_connection_username (optionally), and '
'xenapi_connection_password to use '
'connection_type=xenapi')
return XenAPIConnection(url, username, password)
@ -141,7 +143,7 @@ class XenAPIConnection(object):
def _create_vm(self, instance, kernel, ramdisk):
"""Create a VM record. Returns a Deferred that gives the new
VM reference."""
instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus'])
@ -183,7 +185,7 @@ class XenAPIConnection(object):
def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable):
"""Create a VBD record. Returns a Deferred that gives the new
VBD reference."""
vbd_rec = {}
vbd_rec['VM'] = vm_ref
vbd_rec['VDI'] = vdi_ref
@ -207,10 +209,10 @@ class XenAPIConnection(object):
def _create_vif(self, vm_ref, network_ref, mac_address):
"""Create a VIF record. Returns a Deferred that gives the new
VIF reference."""
vif_rec = {}
vif_rec['device'] = '0'
vif_rec['network']= network_ref
vif_rec['network'] = network_ref
vif_rec['VM'] = vm_ref
vif_rec['MAC'] = mac_address
vif_rec['MTU'] = '1500'
@ -303,7 +305,7 @@ class XenAPIConnection(object):
def _lookup_blocking(self, i):
vms = self._conn.xenapi.VM.get_by_name_label(i)
n = len(vms)
n = len(vms)
if n == 0:
return None
elif n > 1:

View File

@ -61,7 +61,6 @@ class AOEDriver(object):
"Try number %s", tries)
yield self._execute("sleep %s" % tries ** 2)
@defer.inlineCallbacks
def create_volume(self, volume_name, size):
"""Creates a logical volume"""