renamed xxxnode to xxservice

This commit is contained in:
Vishvananda Ishaya
2010-07-23 15:27:18 -07:00
parent 1775720bde
commit 385b300bc7
8 changed files with 63 additions and 167 deletions

View File

@@ -22,11 +22,11 @@
"""
from nova import twistd
from nova.compute import computenode
from nova.compute import computeservice
if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
application = computenode.ComputeNode.create()
application = computeservice.ComputeService.create()

View File

@@ -22,11 +22,11 @@
"""
from nova import twistd
from nova.network import networknode
from nova.network import networkservice
if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
application = networknode.NetworkNode.create()
application = networkservice.NetworkService.create()

View File

@@ -22,11 +22,11 @@
"""
from nova import twistd
from nova.volume import volumenode
from nova.volume import volumeservice
if __name__ == '__main__':
twistd.serve(__file__)
if __name__ == '__builtin__':
application = volumenode.VolumeNode.create()
application = volumeservice.VolumeService.create()

View File

@@ -23,7 +23,6 @@ datastore.
"""
import base64
import json
import logging
import os
import time
@@ -38,9 +37,9 @@ from nova.auth import rbac
from nova.auth import users
from nova.compute import model
from nova.compute import network
from nova.compute import computenode
from nova.compute import computeservice
from nova.endpoint import images
from nova.volume import volumenode
from nova.volume import volumeservice
FLAGS = flags.FLAGS
@@ -76,7 +75,7 @@ class CloudController(object):
def volumes(self):
""" returns a list of all volumes """
for volume_id in datastore.Redis.instance().smembers("volumes"):
volume = volumenode.get_volume(volume_id)
volume = volumeservice.get_volume(volume_id)
yield volume
def __str__(self):
@@ -103,7 +102,7 @@ class CloudController(object):
result = {}
for instance in self.instdir.all:
if instance['project_id'] == project_id:
line = '%s slots=%d' % (instance['private_dns_name'], computenode.INSTANCE_TYPES[instance['instance_type']]['vcpus'])
line = '%s slots=%d' % (instance['private_dns_name'], computeservice.INSTANCE_TYPES[instance['instance_type']]['vcpus'])
if instance['key_name'] in result:
result[instance['key_name']].append(line)
else:
@@ -296,7 +295,7 @@ class CloudController(object):
@rbac.allow('projectmanager', 'sysadmin')
def create_volume(self, context, size, **kwargs):
# TODO(vish): refactor this to create the volume object here and tell volumenode to create it
# TODO(vish): refactor this to create the volume object here and tell volumeservice to create it
res = rpc.call(FLAGS.volume_topic, {"method": "create_volume",
"args" : {"size": size,
"user_id": context.user.id,
@@ -331,7 +330,7 @@ class CloudController(object):
raise exception.NotFound('Instance %s could not be found' % instance_id)
def _get_volume(self, context, volume_id):
volume = volumenode.get_volume(volume_id)
volume = volumeservice.get_volume(volume_id)
if context.user.is_admin() or volume['project_id'] == context.project.id:
return volume
raise exception.NotFound('Volume %s could not be found' % volume_id)
@@ -578,7 +577,7 @@ class CloudController(object):
"args": {"instance_id" : inst.instance_id}})
logging.debug("Casting to node for %s's instance with IP of %s" %
(context.user.name, inst['private_dns_name']))
# TODO: Make the NetworkComputeNode figure out the network name from ip.
# TODO: Make Network figure out the network name from ip.
return defer.succeed(self._format_instances(
context, reservation_id))

View File

@@ -1,103 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Generic Node baseclass for all workers that run on hosts
"""
import inspect
import logging
import os
from twisted.internet import defer
from twisted.internet import task
from twisted.application import service
from nova import datastore
from nova import flags
from nova import rpc
from nova.compute import model
FLAGS = flags.FLAGS
flags.DEFINE_integer('report_interval', 10,
'seconds between nodes reporting state to cloud',
lower_bound=1)
class Node(object, service.Service):
"""Base class for workers that run on hosts"""
@classmethod
def create(cls,
report_interval=None, # defaults to flag
bin_name=None, # defaults to basename of executable
topic=None): # defaults to basename - "nova-" part
"""Instantiates class and passes back application object"""
if not report_interval:
# NOTE(vish): set here because if it is set to flag in the
# parameter list, it wrongly uses the default
report_interval = FLAGS.report_interval
# NOTE(vish): magic to automatically determine bin_name and topic
if not bin_name:
bin_name = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = bin_name.rpartition("nova-")[2]
logging.warn("Starting %s node" % topic)
node_instance = cls()
conn = rpc.Connection.instance()
consumer_all = rpc.AdapterConsumer(
connection=conn,
topic='%s' % topic,
proxy=node_instance)
consumer_node = rpc.AdapterConsumer(
connection=conn,
topic='%s.%s' % (topic, FLAGS.node_name),
proxy=node_instance)
pulse = task.LoopingCall(node_instance.report_state,
FLAGS.node_name,
bin_name)
pulse.start(interval=report_interval, now=False)
consumer_all.attach_to_twisted()
consumer_node.attach_to_twisted()
# This is the parent service that twistd will be looking for when it
# parses this file, return it so that we can get it into globals below
application = service.Application(bin_name)
node_instance.setServiceParent(application)
return application
@defer.inlineCallbacks
def report_state(self, nodename, daemon):
# TODO(termie): make this pattern be more elegant. -todd
try:
record = model.Daemon(nodename, daemon)
record.heartbeat()
if getattr(self, "model_disconnected", False):
self.model_disconnected = False
logging.error("Recovered model server connection!")
except datastore.ConnectionError, ex:
if not getattr(self, "model_disconnected", False):
self.model_disconnected = True
logging.exception("model server went away")
yield

View File

@@ -28,7 +28,7 @@ from nova import flags
from nova import rpc
from nova import test
from nova.auth import users
from nova.compute import computenode
from nova.compute import computeservice
from nova.endpoint import api
from nova.endpoint import cloud
@@ -53,12 +53,12 @@ class CloudTestCase(test.BaseTestCase):
proxy=self.cloud)
self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop))
# set up a node
self.node = computenode.ComputeNode()
self.node_consumer = rpc.AdapterConsumer(connection=self.conn,
# set up a service
self.compute = computeservice.ComputeService()
self.compute_consumer = rpc.AdapterConsumer(connection=self.conn,
topic=FLAGS.compute_topic,
proxy=self.node)
self.injected.append(self.node_consumer.attach_to_tornado(self.ioloop))
proxy=self.compute)
self.injected.append(self.compute_consumer.attach_to_tornado(self.ioloop))
try:
users.UserManager.instance().create_user('admin', 'admin', 'admin')
@@ -76,11 +76,11 @@ class CloudTestCase(test.BaseTestCase):
logging.debug("Can't test instances without a real virtual env.")
return
instance_id = 'foo'
inst = yield self.node.run_instance(instance_id)
inst = yield self.compute.run_instance(instance_id)
output = yield self.cloud.get_console_output(self.context, [instance_id])
logging.debug(output)
self.assert_(output)
rv = yield self.node.terminate_instance(instance_id)
rv = yield self.compute.terminate_instance(instance_id)
def test_run_instances(self):
if FLAGS.fake_libvirt:
@@ -112,7 +112,7 @@ class CloudTestCase(test.BaseTestCase):
# for instance in reservations[res_id]:
for instance in reservations[reservations.keys()[0]]:
logging.debug("Terminating instance %s" % instance['instance_id'])
rv = yield self.node.terminate_instance(instance['instance_id'])
rv = yield self.compute.terminate_instance(instance['instance_id'])
def test_instance_update_state(self):
def instance(num):

View File

@@ -26,7 +26,7 @@ from nova import flags
from nova import test
from nova import utils
from nova.compute import model
from nova.compute import computenode
from nova.compute import computeservice
FLAGS = flags.FLAGS
@@ -60,7 +60,7 @@ class ComputeConnectionTestCase(test.TrialTestCase):
self.flags(fake_libvirt=True,
fake_storage=True,
fake_users=True)
self.node = computenode.ComputeNode()
self.compute = computeservice.ComputeService()
def create_instance(self):
instdir = model.InstanceDirectory()
@@ -81,48 +81,48 @@ class ComputeConnectionTestCase(test.TrialTestCase):
def test_run_describe_terminate(self):
instance_id = self.create_instance()
rv = yield self.node.run_instance(instance_id)
rv = yield self.compute.run_instance(instance_id)
rv = yield self.node.describe_instances()
rv = yield self.compute.describe_instances()
logging.info("Running instances: %s", rv)
self.assertEqual(rv[instance_id].name, instance_id)
rv = yield self.node.terminate_instance(instance_id)
rv = yield self.compute.terminate_instance(instance_id)
rv = yield self.node.describe_instances()
rv = yield self.compute.describe_instances()
logging.info("After terminating instances: %s", rv)
self.assertEqual(rv, {})
@defer.inlineCallbacks
def test_reboot(self):
instance_id = self.create_instance()
rv = yield self.node.run_instance(instance_id)
rv = yield self.compute.run_instance(instance_id)
rv = yield self.node.describe_instances()
rv = yield self.compute.describe_instances()
self.assertEqual(rv[instance_id].name, instance_id)
yield self.node.reboot_instance(instance_id)
yield self.compute.reboot_instance(instance_id)
rv = yield self.node.describe_instances()
rv = yield self.compute.describe_instances()
self.assertEqual(rv[instance_id].name, instance_id)
rv = yield self.node.terminate_instance(instance_id)
rv = yield self.compute.terminate_instance(instance_id)
@defer.inlineCallbacks
def test_console_output(self):
instance_id = self.create_instance()
rv = yield self.node.run_instance(instance_id)
rv = yield self.compute.run_instance(instance_id)
console = yield self.node.get_console_output(instance_id)
console = yield self.compute.get_console_output(instance_id)
self.assert_(console)
rv = yield self.node.terminate_instance(instance_id)
rv = yield self.compute.terminate_instance(instance_id)
@defer.inlineCallbacks
def test_run_instance_existing(self):
instance_id = self.create_instance()
rv = yield self.node.run_instance(instance_id)
rv = yield self.compute.run_instance(instance_id)
rv = yield self.node.describe_instances()
rv = yield self.compute.describe_instances()
self.assertEqual(rv[instance_id].name, instance_id)
self.assertRaises(exception.Error, self.node.run_instance, instance_id)
rv = yield self.node.terminate_instance(instance_id)
self.assertRaises(exception.Error, self.compute.run_instance, instance_id)
rv = yield self.compute.terminate_instance(instance_id)

View File

@@ -21,8 +21,8 @@ import logging
from nova import exception
from nova import flags
from nova import test
from nova.compute import computenode
from nova.volume import volumenode
from nova.compute import computeservice
from nova.volume import volumeservice
FLAGS = flags.FLAGS
@@ -32,24 +32,24 @@ class VolumeTestCase(test.TrialTestCase):
def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
super(VolumeTestCase, self).setUp()
self.mynode = computenode.ComputeNode()
self.mystorage = None
self.compute = computeservice.ComputeService()
self.volume = None
self.flags(fake_libvirt=True,
fake_storage=True)
self.mystorage = volumenode.VolumeNode()
self.volume = volumeservice.VolumeService()
def test_run_create_volume(self):
vol_size = '0'
user_id = 'fake'
project_id = 'fake'
volume_id = self.mystorage.create_volume(vol_size, user_id, project_id)
volume_id = self.volume.create_volume(vol_size, user_id, project_id)
# TODO(termie): get_volume returns differently than create_volume
self.assertEqual(volume_id,
volumenode.get_volume(volume_id)['volume_id'])
volumeservice.get_volume(volume_id)['volume_id'])
rv = self.mystorage.delete_volume(volume_id)
rv = self.volume.delete_volume(volume_id)
self.assertRaises(exception.Error,
volumenode.get_volume,
volumeservice.get_volume,
volume_id)
def test_too_big_volume(self):
@@ -57,7 +57,7 @@ class VolumeTestCase(test.TrialTestCase):
user_id = 'fake'
project_id = 'fake'
self.assertRaises(TypeError,
self.mystorage.create_volume,
self.volume.create_volume,
vol_size, user_id, project_id)
def test_too_many_volumes(self):
@@ -68,26 +68,26 @@ class VolumeTestCase(test.TrialTestCase):
total_slots = FLAGS.slots_per_shelf * num_shelves
vols = []
for i in xrange(total_slots):
vid = self.mystorage.create_volume(vol_size, user_id, project_id)
vid = self.volume.create_volume(vol_size, user_id, project_id)
vols.append(vid)
self.assertRaises(volumenode.NoMoreVolumes,
self.mystorage.create_volume,
self.assertRaises(volumeservice.NoMoreVolumes,
self.volume.create_volume,
vol_size, user_id, project_id)
for id in vols:
self.mystorage.delete_volume(id)
self.volume.delete_volume(id)
def test_run_attach_detach_volume(self):
# Create one volume and one node to test with
# Create one volume and one compute to test with
instance_id = "storage-test"
vol_size = "5"
user_id = "fake"
project_id = 'fake'
mountpoint = "/dev/sdf"
volume_id = self.mystorage.create_volume(vol_size, user_id, project_id)
volume_id = self.volume.create_volume(vol_size, user_id, project_id)
volume_obj = volumenode.get_volume(volume_id)
volume_obj = volumeservice.get_volume(volume_id)
volume_obj.start_attach(instance_id, mountpoint)
rv = yield self.mynode.attach_volume(volume_id,
rv = yield self.compute.attach_volume(volume_id,
instance_id,
mountpoint)
self.assertEqual(volume_obj['status'], "in-use")
@@ -96,16 +96,16 @@ class VolumeTestCase(test.TrialTestCase):
self.assertEqual(volume_obj['mountpoint'], mountpoint)
self.assertRaises(exception.Error,
self.mystorage.delete_volume,
self.volume.delete_volume,
volume_id)
rv = yield self.mystorage.detach_volume(volume_id)
volume_obj = volumenode.get_volume(volume_id)
rv = yield self.volume.detach_volume(volume_id)
volume_obj = volumeservice.get_volume(volume_id)
self.assertEqual(volume_obj['status'], "available")
rv = self.mystorage.delete_volume(volume_id)
rv = self.volume.delete_volume(volume_id)
self.assertRaises(exception.Error,
volumenode.get_volume,
volumeservice.get_volume,
volume_id)
def test_multi_node(self):