merged trunk

This commit is contained in:
Vishvananda Ishaya 2010-07-26 11:02:28 -07:00
commit b1bc1e2edc
38 changed files with 171 additions and 147 deletions

View File

@ -17,7 +17,7 @@
# under the License.
# ARG is the id of the user
export SUBJ=/C=US/ST=California/L=Mountain View/O=Anso Labs/OU=Nova Dev/CN=customer-intCA-$3
export SUBJ="/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=customer-intCA-$1"
mkdir INTER/$1
cd INTER/$1
cp ../../openssl.cnf.tmpl openssl.cnf

14
README
View File

@ -6,15 +6,19 @@ The Choose Your Own Adventure README for Nova:
To monitor it from a distance: follow @novacc on twitter
To tame it for use in your own cloud: read http://docs.novacc.org/getting.started.html
To tame it for use in your own cloud: read http://nova.openstack.org/getting.started.html
To study its anatomy: read http://docs.novacc.org/architecture.html
To study its anatomy: read http://nova.openstack.org/architecture.html
To disect it in detail: visit http://github.com/nova/cc
To disect it in detail: visit http://code.launchpad.net/nova
To taunt it with its weaknesses: use http://github.com/nova/cc/issues
To taunt it with its weaknesses: use http://bugs.launchpad.net/nova
To watch it: http://hudson.openstack.org
To hack at it: read HACKING
To watch it: http://test.novacc.org/waterfall
To laugh at its PEP8 problems: http://hudson.openstack.org/job/nova-pep8/violations
To cry over its pylint problems: http://hudson.openstack.org/job/nova-pylint/violations

View File

@ -33,9 +33,6 @@ NOVA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'nova')
if os.path.exists(NOVA_PATH):
sys.path.insert(0, os.path.dirname(NOVA_PATH))
from carrot import connection
from carrot import messaging
from twisted.internet import task
from twisted.application import service
@ -43,6 +40,7 @@ from nova import flags
from nova import rpc
from nova import twistd
from nova.compute import node
from nova.objectstore import image # For the images_path flag
FLAGS = flags.FLAGS
@ -50,8 +48,8 @@ FLAGS = flags.FLAGS
# context when the twistd.serve() call is made below so any
# flags we define here will have to be conditionally defined,
# flags defined by imported modules are safe.
if 'node_report_state_interval' not in FLAGS:
flags.DEFINE_integer('node_report_state_interval', 10,
if 'compute_report_state_interval' not in FLAGS:
flags.DEFINE_integer('compute_report_state_interval', 10,
'seconds between nodes reporting state to cloud',
lower_bound=1)
logging.getLogger().setLevel(logging.DEBUG)
@ -75,10 +73,10 @@ def main():
bin_name = os.path.basename(__file__)
pulse = task.LoopingCall(n.report_state, FLAGS.node_name, bin_name)
pulse.start(interval=FLAGS.node_report_state_interval, now=False)
pulse.start(interval=FLAGS.compute_report_state_interval, now=False)
injected = consumer_all.attach_to_twisted()
injected = consumer_node.attach_to_twisted()
consumer_all.attach_to_twisted()
consumer_node.attach_to_twisted()
# This is the parent service that twistd will be looking for when it
# parses this file, return it so that we can get it into globals below

View File

@ -22,22 +22,37 @@
"""
import logging
from tornado import ioloop
import os
import sys
# NOTE(termie): kludge so that we can run this from the bin directory in the
# checkout without having to screw with paths
NOVA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'nova')
if os.path.exists(NOVA_PATH):
sys.path.insert(0, os.path.dirname(NOVA_PATH))
from twisted.internet import task
from twisted.application import service
from nova import flags
from nova import rpc
from nova import server
from nova import utils
from nova import twistd
from nova.volume import storage
FLAGS = flags.FLAGS
flags.DEFINE_integer('storage_report_state_interval', 10,
'seconds between broadcasting state to cloud',
lower_bound=1)
# NOTE(termie): This file will necessarily be re-imported under different
# context when the twistd.serve() call is made below so any
# flags we define here will have to be conditionally defined,
# flags defined by imported modules are safe.
if 'volume_report_state_interval' not in FLAGS:
flags.DEFINE_integer('volume_report_state_interval', 10,
'seconds between nodes reporting state to cloud',
lower_bound=1)
def main(argv):
def main():
logging.warn('Starting volume node')
bs = storage.BlockStore()
conn = rpc.Connection.instance()
@ -51,19 +66,29 @@ def main(argv):
topic='%s.%s' % (FLAGS.storage_topic, FLAGS.node_name),
proxy=bs)
io_inst = ioloop.IOLoop.instance()
scheduler = ioloop.PeriodicCallback(
lambda: bs.report_state(),
FLAGS.storage_report_state_interval * 1000,
io_loop=io_inst)
bin_name = os.path.basename(__file__)
pulse = task.LoopingCall(bs.report_state, FLAGS.node_name, bin_name)
pulse.start(interval=FLAGS.volume_report_state_interval, now=False)
injected = consumer_all.attachToTornado(io_inst)
injected = consumer_node.attachToTornado(io_inst)
scheduler.start()
io_inst.start()
consumer_all.attach_to_twisted()
consumer_node.attach_to_twisted()
# This is the parent service that twistd will be looking for when it
# parses this file, return it so that we can get it into globals below
application = service.Application(bin_name)
bs.setServiceParent(application)
return application
# NOTE(termie): When this script is executed from the commandline what it will
# actually do is tell the twistd application runner that it
# should run this file as a twistd application (see below).
if __name__ == '__main__':
utils.default_flagfile()
server.serve('nova-volume', main)
twistd.serve(__file__)
# NOTE(termie): When this script is loaded by the twistd application runner
# this code path will be executed and twistd will expect a
# variable named 'application' to be available, it will then
# handle starting it and stopping it.
if __name__ == '__builtin__':
application = main()

View File

@ -1,4 +1,3 @@
--daemonize=1
--ca_path=/var/lib/nova/CA
--keys_path=/var/lib/nova/keys
--images_path=/var/lib/nova/images

View File

View File

@ -16,8 +16,7 @@ import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('/Users/jmckenty/Projects/cc'))
sys.path.append([os.path.abspath('../nova'),os.path.abspath('../'),os.path.abspath('../vendor')])
sys.path.append([os.path.abspath('../nova'), os.path.abspath('..'), os.path.abspath('../bin')])
# -- General configuration -----------------------------------------------------
@ -25,7 +24,6 @@ sys.path.append([os.path.abspath('../nova'),os.path.abspath('../'),os.path.abspa
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig']
#sphinx_to_github = False
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
@ -68,7 +66,7 @@ release = '0.42'
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
@ -176,7 +174,7 @@ htmlhelp_basename = 'novadoc'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'nova.tex', u'nova Documentation',
('index', 'Nova.tex', u'Nova Documentation',
u'Anso Labs, LLC', 'manual'),
]
@ -199,4 +197,6 @@ latex_documents = [
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
'swift': ('http://swift.openstack.org', None)}

View File

@ -43,7 +43,6 @@ Contents:
nova
fakes
binaries
todo
modules
packages

View File

@ -24,7 +24,7 @@ export VPN_IP=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f2
export BROADCAST=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f3 | awk '{print $1}'`
export DHCP_MASK=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f4 | awk '{print $1}'`
export GATEWAY=`netstat -r | grep default | cut -d' ' -f10`
export SUBJ=/C=US/ST=California/L=Mountain View/O=Anso Labs/OU=Nova Dev/CN=customer-vpn-$VPN_IP
export SUBJ="/C=US/ST=California/L=MountainView/O=AnsoLabs/OU=NovaDev/CN=customer-vpn-$VPN_IP"
DHCP_LOWER=`echo $BROADCAST | awk -F. '{print $1"."$2"."$3"." $4 - 10 }'`
DHCP_UPPER=`echo $BROADCAST | awk -F. '{print $1"."$2"."$3"." $4 - 1 }'`

View File

@ -40,7 +40,8 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None):
formatted as ext2.
In the diagram below, dashes represent drive sectors.
0 a b c d e
+-----+------. . .-------+------. . .------+
| 0 a| b c|d e|
+-----+------. . .-------+------. . .------+
| mbr | primary partiton | local partition |
+-----+------. . .-------+------. . .------+
@ -64,8 +65,8 @@ def partition(infile, outfile, local_bytes=0, local_type='ext2', execute=None):
last_sector = local_last # e
# create an empty file
execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
% (outfile, last_sector, sector_size))
yield execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d'
% (outfile, last_sector, sector_size))
# make mbr partition
yield execute('parted --script %s mklabel msdos' % outfile)

View File

@ -223,16 +223,20 @@ class Node(object, service.Service):
volume_id = None, mountpoint = None):
volume = storage.get_volume(volume_id)
yield self._init_aoe()
yield utils.runthis("Attached Volume: %s",
"sudo virsh attach-disk %s /dev/etherd/%s %s"
% (instance_id, volume['aoe_device'], mountpoint.split("/")[-1]))
yield process.simple_execute(
"sudo virsh attach-disk %s /dev/etherd/%s %s" %
(instance_id,
volume['aoe_device'],
mountpoint.rpartition('/dev/')[2]))
volume.finish_attach()
defer.returnValue(True)
@defer.inlineCallbacks
def _init_aoe(self):
utils.runthis("Doin an AoE discover, returns %s", "sudo aoe-discover")
utils.runthis("Doin an AoE stat, returns %s", "sudo aoe-stat")
yield process.simple_execute("sudo aoe-discover")
yield process.simple_execute("sudo aoe-stat")
@defer.inlineCallbacks
@exception.wrap_exception
def detach_volume(self, instance_id, volume_id):
""" detach a volume from an instance """
@ -240,10 +244,10 @@ class Node(object, service.Service):
# name without the leading /dev/
volume = storage.get_volume(volume_id)
target = volume['mountpoint'].rpartition('/dev/')[2]
utils.runthis("Detached Volume: %s", "sudo virsh detach-disk %s %s "
% (instance_id, target))
yield process.simple_execute(
"sudo virsh detach-disk %s %s " % (instance_id, target))
volume.finish_detach()
return defer.succeed(True)
defer.returnValue(True)
class Group(object):

View File

@ -453,21 +453,21 @@ class CloudController(object):
def format_addresses(self, context):
addresses = []
# TODO(vish): move authorization checking into network.py
for address in self.network.host_objs:
#logging.debug(address_record)
address_rv = {
'public_ip': address['address'],
'instance_id' : address.get('instance_id', 'free')
}
if context.user.is_admin():
address_rv['instance_id'] = "%s (%s, %s)" % (
address['instance_id'],
address['user_id'],
address['project_id'],
)
# TODO(vish): implement a by_project iterator for addresses
if (context.user.is_admin() or
address['project_id'] == self.project.id):
address_rv = {
'public_ip': address['address'],
'instance_id' : address.get('instance_id', 'free')
}
if context.user.is_admin():
address_rv['instance_id'] = "%s (%s, %s)" % (
address['instance_id'],
address['user_id'],
address['project_id'],
)
addresses.append(address_rv)
# logging.debug(addresses)
return {'addressesSet': addresses}
@rbac.allow('netadmin')

View File

@ -44,6 +44,9 @@ class Duplicate(Error):
class NotAuthorized(Error):
pass
class NotEmpty(Error):
pass
def wrap_exception(f):
def _wrap(*args, **kw):
try:

View File

@ -107,7 +107,7 @@ class Bucket(object):
try:
return context.user.is_admin() or self.owner_id == context.project.id
except Exception, e:
pass
return False
def list_keys(self, prefix='', marker=None, max_keys=1000, terse=False):
object_names = []
@ -161,7 +161,7 @@ class Bucket(object):
def delete(self):
if len(os.listdir(self.path)) > 0:
raise exception.NotAuthorized()
raise exception.NotEmpty()
os.rmdir(self.path)
os.remove(self.path+'.json')

View File

@ -277,12 +277,12 @@ class ImageResource(Resource):
def render_POST(self, request):
""" update image attributes: public/private """
image_id = self.get_argument('image_id', u'')
operation = self.get_argument('operation', u'')
image_id = get_argument(request, 'image_id', u'')
operation = get_argument(request, 'operation', u'')
image_object = image.Image(image_id)
if not image.is_authorized(request.context):
if not image_object.is_authorized(request.context):
raise exception.NotAuthorized
image_object.set_public(operation=='add')
@ -291,10 +291,10 @@ class ImageResource(Resource):
def render_DELETE(self, request):
""" delete a registered image """
image_id = self.get_argument("image_id", u"")
image_id = get_argument(request, "image_id", u"")
image_object = image.Image(image_id)
if not image.is_authorized(request.context):
if not image_object.is_authorized(request.context):
raise exception.NotAuthorized
image_object.delete()

View File

@ -151,6 +151,7 @@ class TopicPublisher(Publisher):
def __init__(self, connection=None, topic="broadcast"):
self.routing_key = topic
self.exchange = FLAGS.control_exchange
self.durable = False
super(TopicPublisher, self).__init__(connection=connection)
@ -242,7 +243,7 @@ def send_message(topic, message, wait=True):
consumer.register_callback(generic_response)
publisher = messaging.Publisher(connection=Connection.instance(),
exchange="nova",
exchange=FLAGS.control_exchange,
exchange_type="topic",
routing_key=topic)
publisher.send(message)

View File

@ -142,7 +142,7 @@ class NetworkTestCase(test.TrialTestCase):
self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name)
net = network.get_project_network(self.projects[0].id, "default")
rv = network.deallocate_ip(secondaddress)
self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
self.dnsmasq.release_ip(mac, secondaddress, hostname, net.bridge_name)
def test_release_before_deallocate(self):
pass

View File

@ -27,6 +27,7 @@ from nova import flags
from nova import objectstore
from nova import test
from nova.auth import manager
from nova.exception import NotEmpty, NotFound, NotAuthorized
FLAGS = flags.FLAGS
@ -95,49 +96,37 @@ class ObjectStoreTestCase(test.BaseTestCase):
# another user is not authorized
self.context.user = self.um.get_user('user2')
self.context.project = self.um.get_project('proj2')
self.assert_(bucket.is_authorized(self.context) == False)
self.assertFalse(bucket.is_authorized(self.context))
# admin is authorized to use bucket
self.context.user = self.um.get_user('admin_user')
self.context.project = None
self.assert_(bucket.is_authorized(self.context))
self.assertTrue(bucket.is_authorized(self.context))
# new buckets are empty
self.assert_(bucket.list_keys()['Contents'] == [])
self.assertTrue(bucket.list_keys()['Contents'] == [])
# storing keys works
bucket['foo'] = "bar"
self.assert_(len(bucket.list_keys()['Contents']) == 1)
self.assertEquals(len(bucket.list_keys()['Contents']), 1)
self.assert_(bucket['foo'].read() == 'bar')
self.assertEquals(bucket['foo'].read(), 'bar')
# md5 of key works
self.assert_(bucket['foo'].md5 == hashlib.md5('bar').hexdigest())
self.assertEquals(bucket['foo'].md5, hashlib.md5('bar').hexdigest())
# deleting non-empty bucket throws exception
exception = False
try:
bucket.delete()
except:
exception = True
self.assert_(exception)
# deleting non-empty bucket should throw a NotEmpty exception
self.assertRaises(NotEmpty, bucket.delete)
# deleting key
del bucket['foo']
# deleting empty button
# deleting empty bucket
bucket.delete()
# accessing deleted bucket throws exception
exception = False
try:
objectstore.bucket.Bucket('new_bucket')
except:
exception = True
self.assert_(exception)
self.assertRaises(NotFound, objectstore.bucket.Bucket, 'new_bucket')
def test_images(self):
self.context.user = self.um.get_user('user1')
@ -166,37 +155,4 @@ class ObjectStoreTestCase(test.BaseTestCase):
# verify image permissions
self.context.user = self.um.get_user('user2')
self.context.project = self.um.get_project('proj2')
self.assert_(my_img.is_authorized(self.context) == False)
# class ApiObjectStoreTestCase(test.BaseTestCase):
# def setUp(self):
# super(ApiObjectStoreTestCase, self).setUp()
# FLAGS.fake_users = True
# FLAGS.buckets_path = os.path.join(tempdir, 'buckets')
# FLAGS.images_path = os.path.join(tempdir, 'images')
# FLAGS.ca_path = os.path.join(os.path.dirname(__file__), 'CA')
#
# self.users = manager.AuthManager()
# self.app = handler.Application(self.users)
#
# self.host = '127.0.0.1'
#
# self.conn = boto.s3.connection.S3Connection(
# aws_access_key_id=user.access,
# aws_secret_access_key=user.secret,
# is_secure=False,
# calling_format=boto.s3.connection.OrdinaryCallingFormat(),
# port=FLAGS.s3_port,
# host=FLAGS.s3_host)
#
# self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
#
# def tearDown(self):
# FLAGS.Reset()
# super(ApiObjectStoreTestCase, self).tearDown()
#
# def test_describe_instances(self):
# self.expect_http()
# self.mox.ReplayAll()
#
# self.assertEqual(self.ec2.get_all_instances(), [])
self.assertFalse(my_img.is_authorized(self.context))

View File

@ -28,15 +28,17 @@ import os
import shutil
import socket
import tempfile
import time
from tornado import ioloop
from twisted.application import service
from twisted.internet import defer
from nova import datastore
from nova import exception
from nova import flags
from nova import process
from nova import utils
from nova import validate
from nova.compute import model
FLAGS = flags.FLAGS
@ -80,7 +82,7 @@ def get_volume(volume_id):
return volume_class(volume_id=volume_id)
raise exception.Error("Volume does not exist")
class BlockStore(object):
class BlockStore(object, service.Service):
"""
There is one BlockStore running on each volume node.
However, each BlockStore can report on the state of
@ -102,9 +104,21 @@ class BlockStore(object):
except Exception, err:
pass
def report_state(self):
#TODO: aggregate the state of the system
pass
@defer.inlineCallbacks
def report_state(self, nodename, daemon):
# TODO(termie): make this pattern be more elegant. -todd
try:
record = model.Daemon(nodename, daemon)
record.heartbeat()
if getattr(self, "model_disconnected", False):
self.model_disconnected = False
logging.error("Recovered model server connection!")
except model.ConnectionError, ex:
if not getattr(self, "model_disconnected", False):
self.model_disconnected = True
logging.exception("model server went away")
yield
@validate.rangetest(size=(0, 1000))
def create_volume(self, size, user_id, project_id):
@ -143,17 +157,24 @@ class BlockStore(object):
datastore.Redis.instance().srem('volumes:%s' % (FLAGS.storage_name), vol['volume_id'])
return True
@defer.inlineCallbacks
def _restart_exports(self):
if FLAGS.fake_storage:
return
utils.runthis("Setting exports to auto: %s", "sudo vblade-persist auto all")
utils.runthis("Starting all exports: %s", "sudo vblade-persist start all")
yield process.simple_execute(
"sudo vblade-persist auto all")
yield process.simple_execute(
"sudo vblade-persist start all")
@defer.inlineCallbacks
def _init_volume_group(self):
if FLAGS.fake_storage:
return
utils.runthis("PVCreate returned: %s", "sudo pvcreate %s" % (FLAGS.storage_dev))
utils.runthis("VGCreate returned: %s", "sudo vgcreate %s %s" % (FLAGS.volume_group, FLAGS.storage_dev))
yield process.simple_execute(
"sudo pvcreate %s" % (FLAGS.storage_dev))
yield process.simple_execute(
"sudo vgcreate %s %s" % (FLAGS.volume_group,
FLAGS.storage_dev))
class Volume(datastore.BasicModel):
@ -227,15 +248,22 @@ class Volume(datastore.BasicModel):
self._delete_lv()
super(Volume, self).destroy()
@defer.inlineCallbacks
def create_lv(self):
if str(self['size']) == '0':
sizestr = '100M'
else:
sizestr = '%sG' % self['size']
utils.runthis("Creating LV: %s", "sudo lvcreate -L %s -n %s %s" % (sizestr, self['volume_id'], FLAGS.volume_group))
yield process.simple_execute(
"sudo lvcreate -L %s -n %s %s" % (sizestr,
self['volume_id'],
FLAGS.volume_group))
@defer.inlineCallbacks
def _delete_lv(self):
utils.runthis("Removing LV: %s", "sudo lvremove -f %s/%s" % (FLAGS.volume_group, self['volume_id']))
yield process.simple_execute(
"sudo lvremove -f %s/%s" % (FLAGS.volume_group,
self['volume_id']))
def _setup_export(self):
(shelf_id, blade_id) = get_next_aoe_numbers()
@ -245,8 +273,9 @@ class Volume(datastore.BasicModel):
self.save()
self._exec_export()
@defer.inlineCallbacks
def _exec_export(self):
utils.runthis("Creating AOE export: %s",
yield process.simple_execute(
"sudo vblade-persist setup %s %s %s /dev/%s/%s" %
(self['shelf_id'],
self['blade_id'],
@ -254,9 +283,14 @@ class Volume(datastore.BasicModel):
FLAGS.volume_group,
self['volume_id']))
@defer.inlineCallbacks
def _remove_export(self):
utils.runthis("Stopped AOE export: %s", "sudo vblade-persist stop %s %s" % (self['shelf_id'], self['blade_id']))
utils.runthis("Destroyed AOE export: %s", "sudo vblade-persist destroy %s %s" % (self['shelf_id'], self['blade_id']))
yield process.simple_execute(
"sudo vblade-persist stop %s %s" % (self['shelf_id'],
self['blade_id']))
yield process.simple_execute(
"sudo vblade-persist destroy %s %s" % (self['shelf_id'],
self['blade_id']))
class FakeVolume(Volume):

View File

@ -1,4 +1,4 @@
[build_sphinx]
source-dir = docs
build-dir = docs/_build
source-dir = doc/source
build-dir = doc/build
all_files = 1