updates per review

This commit is contained in:
termie 2010-12-14 16:05:39 -08:00
parent c835c44198
commit a2a8406b5d
16 changed files with 35 additions and 132 deletions

View File

@ -36,6 +36,7 @@ from nova import flags
from nova import utils
from nova import wsgi
FLAGS = flags.FLAGS
flags.DEFINE_integer('osapi_port', 8774, 'OpenStack API port')
flags.DEFINE_string('osapi_host', '0.0.0.0', 'OpenStack API host')
@ -43,14 +44,10 @@ flags.DEFINE_integer('ec2api_port', 8773, 'EC2 API port')
flags.DEFINE_string('ec2api_host', '0.0.0.0', 'EC2 API host')
def main():
if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
server = wsgi.Server()
server.start(api.API('os'), FLAGS.osapi_port, host=FLAGS.osapi_host)
server.start(api.API('ec2'), FLAGS.ec2api_port, host=FLAGS.ec2api_host)
server.wait()
if __name__ == '__main__':
utils.default_flagfile()
main()

View File

@ -40,9 +40,6 @@ from nova import utils
from nova import wsgi
FLAGS = flags.FLAGS
flags.DEFINE_integer('api_port', 8773, 'API port')
FLAGS = flags.FLAGS
flags.DEFINE_integer('osapi_port', 8774, 'OpenStack API port')
flags.DEFINE_string('osapi_host', '0.0.0.0', 'OpenStack API host')

View File

@ -17,7 +17,7 @@
# License for the specific language governing permissions and limitations
# under the License.
""" Starter script for Nova Scheduler."""
"""Starter script for Nova Scheduler."""
import eventlet
eventlet.monkey_patch()

View File

@ -203,7 +203,7 @@ class ComputeManager(manager.Manager):
volume_id, mountpoint)
instance_ref = self.db.instance_get(context, instance_id)
dev_path = self.volume_manager.setup_compute_volume(context,
volume_id)
volume_id)
try:
self.driver.attach_volume(instance_ref['name'],
dev_path,
@ -238,7 +238,7 @@ class ComputeManager(manager.Manager):
instance_ref['name'])
else:
self.driver.detach_volume(instance_ref['name'],
volume_ref['mountpoint'])
volume_ref['mountpoint'])
self.volume_manager.remove_compute_volume(context, volume_id)
self.db.volume_detached(context, volume_id)
return True

View File

@ -68,7 +68,7 @@ class Manager(base.Base):
def periodic_tasks(self, context=None):
"""Tasks to be run at a periodic interval"""
return
pass
def init_host(self):
"""Do any initialization that needs to be run if this is a standalone

View File

@ -188,7 +188,6 @@ class AdapterConsumer(TopicConsumer):
node_func = getattr(self.proxy, str(method))
node_args = dict((str(k), v) for k, v in args.iteritems())
# NOTE(vish): magic is fun!
# pylint: disable-msg=W0142
try:
rval = node_func(context=ctxt, **node_args)
if msg_id:

View File

@ -205,39 +205,6 @@ class Service(object):
logging.exception("model server went away")
def stop(pidfile):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % pidfile)
# Not an error in a restart
return
# Try killing the daemon process
try:
while 1:
os.kill(pid, signal.SIGKILL)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(pidfile):
os.remove(pidfile)
else:
print str(err)
sys.exit(1)
def serve(*services):
argv = FLAGS(sys.argv)
@ -247,38 +214,7 @@ def serve(*services):
name = '_'.join(x.binary for x in services)
logging.debug("Serving %s" % name)
logging.getLogger('amqplib').setLevel(logging.DEBUG)
if not FLAGS.pidfile:
FLAGS.pidfile = '%s.pid' % name
# NOTE(vish): if we're running nodaemon, redirect the log to stdout
#if FLAGS.nodaemon and not FLAGS.logfile:
# FLAGS.logfile = "-"
#if not FLAGS.logfile:
# FLAGS.logfile = '%s.log' % name
#if not FLAGS.prefix:
# FLAGS.prefix = name
action = 'start'
if len(argv) > 1:
action = argv.pop()
if action == 'stop':
stop(FLAGS.pidfile)
sys.exit()
elif action == 'restart':
stop(FLAGS.pidfile)
elif action == 'start':
pass
else:
print 'usage: %s [options] [start|stop|restart]' % argv[0]
sys.exit(1)
#formatter = logging.Formatter(
# '(%(name)s): %(levelname)s %(message)s')
#handler = logging.StreamHandler()
#handler.setFormatter(formatter)
#logging.getLogger().addHandler(handler)
logging.getLogger('amqplib').setLevel(logging.WARN)
if FLAGS.verbose:
logging.getLogger().setLevel(logging.DEBUG)
@ -292,9 +228,6 @@ def serve(*services):
for x in services:
x.start()
#while True:
# greenthread.sleep(5)
def wait():
while True:

View File

@ -91,8 +91,6 @@ class ServiceTestCase(test.TestCase):
self.mox.StubOutWithMock(rpc,
'AdapterConsumer',
use_mock_anything=True)
#self.mox.StubOutWithMock(
# service.task, 'LoopingCall', use_mock_anything=True)
rpc.AdapterConsumer(connection=mox.IgnoreArg(),
topic=topic,
proxy=mox.IsA(service.Service)).AndReturn(
@ -106,17 +104,6 @@ class ServiceTestCase(test.TestCase):
rpc.AdapterConsumer.attach_to_eventlet()
rpc.AdapterConsumer.attach_to_eventlet()
# Stub out looping call a bit needlessly since we don't have an easy
# way to cancel it (yet) when the tests finishes
#service.task.LoopingCall(mox.IgnoreArg()).AndReturn(
# service.task.LoopingCall)
#service.task.LoopingCall.start(interval=mox.IgnoreArg(),
# now=mox.IgnoreArg())
#service.task.LoopingCall(mox.IgnoreArg()).AndReturn(
# service.task.LoopingCall)
#service.task.LoopingCall.start(interval=mox.IgnoreArg(),
# now=mox.IgnoreArg())
service_create = {'host': host,
'binary': binary,
'topic': topic,

View File

@ -241,7 +241,7 @@ class LoopingCall(object):
self.f(*self.args, **self.kw)
greenthread.sleep(interval)
except Exception:
logging.exception('hhmm')
logging.exception('in looping call')
done.send_exception(*sys.exc_info())
return

View File

@ -122,13 +122,13 @@ class FakeConnection(object):
"""
Rescue the specified instance.
"""
return
pass
def unrescue(self, instance):
"""
Unrescue the specified instance.
"""
return
pass
def destroy(self, instance):
"""

View File

@ -215,7 +215,7 @@ class LibvirtConnection(object):
self._cleanup(instance)
done.send()
greenthread.spawn(_wait_for_time)
greenthread.spawn(_wait_for_timer)
return done
def _cleanup(self, instance):
@ -365,9 +365,9 @@ class LibvirtConnection(object):
if virsh_output.startswith('/dev/'):
logging.info('cool, it\'s a device')
r = utils.execute("sudo dd if=%s iflag=nonblock" %
virsh_output, check_exit_code=False)
return r[0]
out, err = utils.execute("sudo dd if=%s iflag=nonblock" %
virsh_output, check_exit_code=False)
return out
else:
return ''
@ -388,8 +388,7 @@ class LibvirtConnection(object):
console_log = os.path.join(FLAGS.instances_path, instance['name'],
'console.log')
utils.execute('sudo chown %d %s' % (os.getuid(),
console_log))
utils.execute('sudo chown %d %s' % (os.getuid(), console_log))
if FLAGS.libvirt_type == 'xen':
# Xen is special
@ -476,7 +475,6 @@ class LibvirtConnection(object):
['local_gb']
* 1024 * 1024 * 1024)
resize = inst['instance_type'] != 'm1.tiny'
resize = True
if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-':
resize = False
@ -743,7 +741,7 @@ class NWFilterFirewall(object):
if callable(xml):
xml = xml()
# execute in a native thread and block until done
# execute in a native thread and block current greenthread until done
tpool.execute(self._conn.nwfilterDefineXML, xml)
@staticmethod

View File

@ -30,10 +30,9 @@ class NetworkHelper():
@classmethod
def find_network_with_bridge(cls, session, bridge):
""" Return the network on which the bridge is attached, if found """
""" Return the network on which the bridge is attached, if found."""
expr = 'field "bridge" = "%s"' % bridge
networks = session.call_xenapi('network.get_all_records_where',
expr)
networks = session.call_xenapi('network.get_all_records_where', expr)
if len(networks) == 1:
return networks.keys()[0]
elif len(networks) > 1:

View File

@ -107,8 +107,7 @@ class VMOps(object):
if vdis:
for vdi in vdis:
try:
task = self._session.call_xenapi('Async.VDI.destroy',
vdi)
task = self._session.call_xenapi('Async.VDI.destroy', vdi)
self._session.wait_for_task(task)
except XenAPI.Failure, exc:
logging.warn(exc)

View File

@ -175,9 +175,11 @@ class XenAPISession(object):
The task is polled until it completes."""
done = event.Event()
loop = utis.LoopingTask(self._poll_task, task, done)
loop = utils.LoopingTask(self._poll_task, task, done)
loop.start(FLAGS.xenapi_task_poll_interval, now=True)
return done.wait()
rv = done.wait()
loop.stop()
return rv
def _poll_task(self, task, done):
"""Poll the given XenAPI task, and fire the given Deferred if we

View File

@ -22,10 +22,10 @@ Drivers for volumes.
import logging
import os
import time
from nova import exception
from nova import flags
from nova import process
from nova import utils
@ -75,7 +75,7 @@ class VolumeDriver(object):
raise
logging.exception("Recovering from a failed execute."
"Try number %s", tries)
self._execute("sleep %s" % tries ** 2)
time.sleep(tries ** 2)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
@ -91,21 +91,20 @@ class VolumeDriver(object):
sizestr = '%sG' % volume['size']
self._try_execute("sudo lvcreate -L %s -n %s %s" %
(sizestr,
volume['name'],
FLAGS.volume_group))
volume['name'],
FLAGS.volume_group))
def delete_volume(self, volume):
"""Deletes a logical volume."""
self._try_execute("sudo lvremove -f %s/%s" %
(FLAGS.volume_group,
volume['name']))
volume['name']))
def local_path(self, volume):
# NOTE(vish): stops deprecation warning
escaped_group = FLAGS.volume_group.replace('-', '--')
escaped_name = volume['name'].replace('-', '--')
return "/dev/mapper/%s-%s" % (escaped_group,
escaped_name)
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
@ -165,7 +164,7 @@ class AOEDriver(VolumeDriver):
# still works for the other volumes, so we
# just wait a bit for the current volume to
# be ready and ignore any errors.
self._execute("sleep 2")
time.sleep(2)
self._execute("sudo vblade-persist auto all",
check_exit_code=False)
self._execute("sudo vblade-persist start all",
@ -275,9 +274,8 @@ class ISCSIDriver(VolumeDriver):
def discover_volume(self, volume):
"""Discover volume on a remote host."""
(iscsi_name,
iscsi_portal) = self._get_name_and_portal(volume['name'],
volume['host'])
iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'],
volume['host'])
self._execute("sudo iscsiadm -m node -T %s -p %s --login" %
(iscsi_name, iscsi_portal))
self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
@ -287,9 +285,8 @@ class ISCSIDriver(VolumeDriver):
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host."""
(iscsi_name,
iscsi_portal) = self._get_name_and_portal(volume['name'],
volume['host'])
iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'],
volume['host'])
self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
"-n node.startup -v manual" %
(iscsi_name, iscsi_portal))

View File

@ -46,7 +46,6 @@ import __main__
import os
import sys
from twisted.scripts import trial as trial_script
from nova import flags
@ -66,8 +65,6 @@ from nova.tests.rpc_unittest import *
from nova.tests.scheduler_unittest import *
from nova.tests.service_unittest import *
from nova.tests.twistd_unittest import *
from nova.tests.validator_unittest import *
from nova.tests.virt_unittest import *
from nova.tests.virt_unittest import *
from nova.tests.volume_unittest import *
@ -85,8 +82,6 @@ if __name__ == '__main__':
config = OptionsClass()
argv = config.parseOptions()
argv = FLAGS(sys.argv)
FLAGS.verbose = True
# TODO(termie): these should make a call instead of doing work on import