merged trunk
This commit is contained in:
18
bin/nova-api
18
bin/nova-api
@@ -22,11 +22,20 @@ Tornado daemon for the main API endpoint.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
from tornado import httpserver
|
from tornado import httpserver
|
||||||
from tornado import ioloop
|
from tornado import ioloop
|
||||||
|
|
||||||
|
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||||
|
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||||
|
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||||
|
os.pardir,
|
||||||
|
os.pardir))
|
||||||
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import rpc
|
|
||||||
from nova import server
|
from nova import server
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.endpoint import admin
|
from nova.endpoint import admin
|
||||||
@@ -43,14 +52,7 @@ def main(_argv):
|
|||||||
'Admin': admin.AdminController()}
|
'Admin': admin.AdminController()}
|
||||||
_app = api.APIServerApplication(controllers)
|
_app = api.APIServerApplication(controllers)
|
||||||
|
|
||||||
conn = rpc.Connection.instance()
|
|
||||||
consumer = rpc.AdapterConsumer(connection=conn,
|
|
||||||
topic=FLAGS.cloud_topic,
|
|
||||||
proxy=controllers['Cloud'])
|
|
||||||
|
|
||||||
io_inst = ioloop.IOLoop.instance()
|
io_inst = ioloop.IOLoop.instance()
|
||||||
_injected = consumer.attach_to_tornado(io_inst)
|
|
||||||
|
|
||||||
http_server = httpserver.HTTPServer(_app)
|
http_server = httpserver.HTTPServer(_app)
|
||||||
http_server.listen(FLAGS.cc_port)
|
http_server.listen(FLAGS.cc_port)
|
||||||
logging.debug('Started HTTP server on %s', FLAGS.cc_port)
|
logging.debug('Started HTTP server on %s', FLAGS.cc_port)
|
||||||
|
|||||||
@@ -21,6 +21,17 @@
|
|||||||
Nova API daemon.
|
Nova API daemon.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||||
|
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||||
|
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||||
|
os.pardir,
|
||||||
|
os.pardir))
|
||||||
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
from nova import api
|
from nova import api
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
|||||||
@@ -21,12 +21,23 @@
|
|||||||
Twistd daemon for the nova compute nodes.
|
Twistd daemon for the nova compute nodes.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||||
|
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||||
|
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||||
|
os.pardir,
|
||||||
|
os.pardir))
|
||||||
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
from nova import service
|
||||||
from nova import twistd
|
from nova import twistd
|
||||||
from nova.compute import service
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
twistd.serve(__file__)
|
twistd.serve(__file__)
|
||||||
|
|
||||||
if __name__ == '__builtin__':
|
if __name__ == '__builtin__':
|
||||||
application = service.ComputeService.create() # pylint: disable-msg=C0103
|
application = service.Service.create() # pylint: disable=C0103
|
||||||
|
|||||||
@@ -25,29 +25,40 @@ import logging
|
|||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
#TODO(joshua): there is concern that the user dnsmasq runs under will not
|
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||||
# have nova in the path. This should be verified and if it is
|
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||||
# not true the ugly line below can be removed
|
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||||
sys.path.append(os.path.abspath(os.path.join(__file__, "../../")))
|
os.pardir,
|
||||||
|
os.pardir))
|
||||||
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
from nova import db
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.network import linux_net
|
from nova.network import linux_net
|
||||||
from nova.network import model
|
|
||||||
from nova.network import service
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
flags.DECLARE('auth_driver', 'nova.auth.manager')
|
||||||
|
flags.DECLARE('redis_db', 'nova.datastore')
|
||||||
|
flags.DECLARE('network_size', 'nova.network.manager')
|
||||||
|
flags.DECLARE('num_networks', 'nova.network.manager')
|
||||||
|
flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager')
|
||||||
|
|
||||||
|
|
||||||
def add_lease(_mac, ip_address, _hostname, _interface):
|
def add_lease(mac, ip_address, _hostname, _interface):
|
||||||
"""Set the IP that was assigned by the DHCP server."""
|
"""Set the IP that was assigned by the DHCP server."""
|
||||||
if FLAGS.fake_rabbit:
|
if FLAGS.fake_rabbit:
|
||||||
service.VlanNetworkService().lease_ip(ip_address)
|
logging.debug("leasing ip")
|
||||||
|
network_manager = utils.import_object(FLAGS.network_manager)
|
||||||
|
network_manager.lease_fixed_ip(None, mac, ip_address)
|
||||||
else:
|
else:
|
||||||
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
|
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.host),
|
||||||
{"method": "lease_ip",
|
{"method": "lease_fixed_ip",
|
||||||
"args": {"fixed_ip": ip_address}})
|
"args": {"context": None,
|
||||||
|
"mac": mac,
|
||||||
|
"address": ip_address}})
|
||||||
|
|
||||||
|
|
||||||
def old_lease(_mac, _ip_address, _hostname, _interface):
|
def old_lease(_mac, _ip_address, _hostname, _interface):
|
||||||
@@ -55,23 +66,24 @@ def old_lease(_mac, _ip_address, _hostname, _interface):
|
|||||||
logging.debug("Adopted old lease or got a change of mac/hostname")
|
logging.debug("Adopted old lease or got a change of mac/hostname")
|
||||||
|
|
||||||
|
|
||||||
def del_lease(_mac, ip_address, _hostname, _interface):
|
def del_lease(mac, ip_address, _hostname, _interface):
|
||||||
"""Called when a lease expires."""
|
"""Called when a lease expires."""
|
||||||
if FLAGS.fake_rabbit:
|
if FLAGS.fake_rabbit:
|
||||||
service.VlanNetworkService().release_ip(ip_address)
|
logging.debug("releasing ip")
|
||||||
|
network_manager = utils.import_object(FLAGS.network_manager)
|
||||||
|
network_manager.release_fixed_ip(None, mac, ip_address)
|
||||||
else:
|
else:
|
||||||
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
|
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.host),
|
||||||
{"method": "release_ip",
|
{"method": "release_fixed_ip",
|
||||||
"args": {"fixed_ip": ip_address}})
|
"args": {"context": None,
|
||||||
|
"mac": mac,
|
||||||
|
"address": ip_address}})
|
||||||
|
|
||||||
|
|
||||||
def init_leases(interface):
|
def init_leases(interface):
|
||||||
"""Get the list of hosts for an interface."""
|
"""Get the list of hosts for an interface."""
|
||||||
net = model.get_network_by_interface(interface)
|
network_ref = db.network_get_by_bridge(None, interface)
|
||||||
res = ""
|
return linux_net.get_dhcp_hosts(None, network_ref['id'])
|
||||||
for address in net.assigned_objs:
|
|
||||||
res += "%s\n" % linux_net.host_dhcp(address)
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
@@ -83,10 +95,16 @@ def main():
|
|||||||
if int(os.environ.get('TESTING', '0')):
|
if int(os.environ.get('TESTING', '0')):
|
||||||
FLAGS.fake_rabbit = True
|
FLAGS.fake_rabbit = True
|
||||||
FLAGS.redis_db = 8
|
FLAGS.redis_db = 8
|
||||||
FLAGS.network_size = 32
|
FLAGS.network_size = 16
|
||||||
FLAGS.connection_type = 'fake'
|
FLAGS.connection_type = 'fake'
|
||||||
FLAGS.fake_network = True
|
FLAGS.fake_network = True
|
||||||
FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
|
FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
|
||||||
|
FLAGS.num_networks = 5
|
||||||
|
path = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||||
|
'..',
|
||||||
|
'_trial_temp',
|
||||||
|
'nova.sqlite'))
|
||||||
|
FLAGS.sql_connection = 'sqlite:///%s' % path
|
||||||
action = argv[1]
|
action = argv[1]
|
||||||
if action in ['add', 'del', 'old']:
|
if action in ['add', 'del', 'old']:
|
||||||
mac = argv[2]
|
mac = argv[2]
|
||||||
|
|||||||
@@ -29,6 +29,14 @@ import subprocess
|
|||||||
import sys
|
import sys
|
||||||
import urllib2
|
import urllib2
|
||||||
|
|
||||||
|
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||||
|
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||||
|
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||||
|
os.pardir,
|
||||||
|
os.pardir))
|
||||||
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.objectstore import image
|
from nova.objectstore import image
|
||||||
|
|||||||
@@ -21,9 +21,19 @@
|
|||||||
Daemon for Nova RRD based instance resource monitoring.
|
Daemon for Nova RRD based instance resource monitoring.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
import logging
|
import logging
|
||||||
|
import sys
|
||||||
from twisted.application import service
|
from twisted.application import service
|
||||||
|
|
||||||
|
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||||
|
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||||
|
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||||
|
os.pardir,
|
||||||
|
os.pardir))
|
||||||
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
from nova import twistd
|
from nova import twistd
|
||||||
from nova.compute import monitor
|
from nova.compute import monitor
|
||||||
|
|
||||||
|
|||||||
106
bin/nova-manage
106
bin/nova-manage
@@ -17,18 +17,60 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
# Interactive shell based on Django:
|
||||||
|
#
|
||||||
|
# Copyright (c) 2005, the Lawrence Journal-World
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
# are permitted provided that the following conditions are met:
|
||||||
|
#
|
||||||
|
# 1. Redistributions of source code must retain the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer.
|
||||||
|
#
|
||||||
|
# 2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer in the
|
||||||
|
# documentation and/or other materials provided with the distribution.
|
||||||
|
#
|
||||||
|
# 3. Neither the name of Django nor the names of its contributors may be used
|
||||||
|
# to endorse or promote products derived from this software without
|
||||||
|
# specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||||
|
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||||
|
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
CLI interface for nova management.
|
CLI interface for nova management.
|
||||||
Connects to the running ADMIN api in the api daemon.
|
Connects to the running ADMIN api in the api daemon.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
import IPy
|
||||||
|
|
||||||
|
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||||
|
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||||
|
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||||
|
os.pardir,
|
||||||
|
os.pardir))
|
||||||
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
from nova import db
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.compute import model
|
|
||||||
from nova.cloudpipe import pipelib
|
from nova.cloudpipe import pipelib
|
||||||
from nova.endpoint import cloud
|
from nova.endpoint import cloud
|
||||||
|
|
||||||
@@ -41,7 +83,6 @@ class VpnCommands(object):
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.manager = manager.AuthManager()
|
self.manager = manager.AuthManager()
|
||||||
self.instdir = model.InstanceDirectory()
|
|
||||||
self.pipe = pipelib.CloudPipe(cloud.CloudController())
|
self.pipe = pipelib.CloudPipe(cloud.CloudController())
|
||||||
|
|
||||||
def list(self):
|
def list(self):
|
||||||
@@ -73,9 +114,8 @@ class VpnCommands(object):
|
|||||||
|
|
||||||
def _vpn_for(self, project_id):
|
def _vpn_for(self, project_id):
|
||||||
"""Get the VPN instance for a project ID."""
|
"""Get the VPN instance for a project ID."""
|
||||||
for instance in self.instdir.all:
|
for instance in db.instance_get_all():
|
||||||
if ('image_id' in instance.state
|
if (instance['image_id'] == FLAGS.vpn_image_id
|
||||||
and instance['image_id'] == FLAGS.vpn_image_id
|
|
||||||
and not instance['state_description'] in
|
and not instance['state_description'] in
|
||||||
['shutting_down', 'shutdown']
|
['shutting_down', 'shutdown']
|
||||||
and instance['project_id'] == project_id):
|
and instance['project_id'] == project_id):
|
||||||
@@ -94,6 +134,29 @@ class VpnCommands(object):
|
|||||||
self.pipe.launch_vpn_instance(project_id)
|
self.pipe.launch_vpn_instance(project_id)
|
||||||
|
|
||||||
|
|
||||||
|
class ShellCommands(object):
|
||||||
|
def run(self):
|
||||||
|
"Runs a Python interactive interpreter. Tries to use IPython, if it's available."
|
||||||
|
try:
|
||||||
|
import IPython
|
||||||
|
# Explicitly pass an empty list as arguments, because otherwise IPython
|
||||||
|
# would use sys.argv from this script.
|
||||||
|
shell = IPython.Shell.IPShell(argv=[])
|
||||||
|
shell.mainloop()
|
||||||
|
except ImportError:
|
||||||
|
import code
|
||||||
|
try: # Try activating rlcompleter, because it's handy.
|
||||||
|
import readline
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# We don't have to wrap the following import in a 'try', because
|
||||||
|
# we already know 'readline' was imported successfully.
|
||||||
|
import rlcompleter
|
||||||
|
readline.parse_and_bind("tab:complete")
|
||||||
|
code.interact()
|
||||||
|
|
||||||
|
|
||||||
class RoleCommands(object):
|
class RoleCommands(object):
|
||||||
"""Class for managing roles."""
|
"""Class for managing roles."""
|
||||||
|
|
||||||
@@ -211,12 +274,45 @@ class ProjectCommands(object):
|
|||||||
with open(filename, 'w') as f:
|
with open(filename, 'w') as f:
|
||||||
f.write(zip_file)
|
f.write(zip_file)
|
||||||
|
|
||||||
|
class FloatingIpCommands(object):
|
||||||
|
"""Class for managing floating ip."""
|
||||||
|
|
||||||
|
def create(self, host, range):
|
||||||
|
"""Creates floating ips for host by range
|
||||||
|
arguments: host ip_range"""
|
||||||
|
for address in IPy.IP(range):
|
||||||
|
db.floating_ip_create(None, {'address': str(address),
|
||||||
|
'host': host})
|
||||||
|
|
||||||
|
def delete(self, ip_range):
|
||||||
|
"""Deletes floating ips by range
|
||||||
|
arguments: range"""
|
||||||
|
for address in IPy.IP(ip_range):
|
||||||
|
db.floating_ip_destroy(None, str(address))
|
||||||
|
|
||||||
|
|
||||||
|
def list(self, host=None):
|
||||||
|
"""Lists all floating ips (optionally by host)
|
||||||
|
arguments: [host]"""
|
||||||
|
if host == None:
|
||||||
|
floating_ips = db.floating_ip_get_all(None)
|
||||||
|
else:
|
||||||
|
floating_ips = db.floating_ip_get_all_by_host(None, host)
|
||||||
|
for floating_ip in floating_ips:
|
||||||
|
instance = None
|
||||||
|
if floating_ip['fixed_ip']:
|
||||||
|
instance = floating_ip['fixed_ip']['instance']['str_id']
|
||||||
|
print "%s\t%s\t%s" % (floating_ip['host'],
|
||||||
|
floating_ip['address'],
|
||||||
|
instance)
|
||||||
|
|
||||||
CATEGORIES = [
|
CATEGORIES = [
|
||||||
('user', UserCommands),
|
('user', UserCommands),
|
||||||
('project', ProjectCommands),
|
('project', ProjectCommands),
|
||||||
('role', RoleCommands),
|
('role', RoleCommands),
|
||||||
|
('shell', ShellCommands),
|
||||||
('vpn', VpnCommands),
|
('vpn', VpnCommands),
|
||||||
|
('floating', FloatingIpCommands)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -21,17 +21,23 @@
|
|||||||
Twistd daemon for the nova network nodes.
|
Twistd daemon for the nova network nodes.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from nova import flags
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||||
|
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||||
|
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||||
|
os.pardir,
|
||||||
|
os.pardir))
|
||||||
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
from nova import service
|
||||||
from nova import twistd
|
from nova import twistd
|
||||||
|
|
||||||
from nova.network import service
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
twistd.serve(__file__)
|
twistd.serve(__file__)
|
||||||
|
|
||||||
if __name__ == '__builtin__':
|
if __name__ == '__builtin__':
|
||||||
# pylint: disable-msg=C0103
|
application = service.Service.create() # pylint: disable-msg=C0103
|
||||||
application = service.type_to_class(FLAGS.network_type).create()
|
|
||||||
|
|||||||
@@ -21,6 +21,17 @@
|
|||||||
Twisted daemon for nova objectstore. Supports S3 API.
|
Twisted daemon for nova objectstore. Supports S3 API.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||||
|
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||||
|
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||||
|
os.pardir,
|
||||||
|
os.pardir))
|
||||||
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova import twistd
|
from nova import twistd
|
||||||
|
|||||||
@@ -21,12 +21,23 @@
|
|||||||
Twistd daemon for the nova volume nodes.
|
Twistd daemon for the nova volume nodes.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
||||||
|
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||||
|
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||||
|
os.pardir,
|
||||||
|
os.pardir))
|
||||||
|
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||||
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
from nova import service
|
||||||
from nova import twistd
|
from nova import twistd
|
||||||
from nova.volume import service
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
twistd.serve(__file__)
|
twistd.serve(__file__)
|
||||||
|
|
||||||
if __name__ == '__builtin__':
|
if __name__ == '__builtin__':
|
||||||
application = service.VolumeService.create() # pylint: disable-msg=C0103
|
application = service.Service.create() # pylint: disable-msg=C0103
|
||||||
|
|||||||
59
bzrplugins/novalog/__init__.py
Normal file
59
bzrplugins/novalog/__init__.py
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
# Copyright 2010 OpenStack LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Log format for Nova's changelog."""
|
||||||
|
|
||||||
|
import bzrlib.log
|
||||||
|
from bzrlib.osutils import format_date
|
||||||
|
|
||||||
|
#
|
||||||
|
# This is mostly stolen from bzrlib.log.GnuChangelogLogFormatter
|
||||||
|
# The difference is that it logs the author rather than the committer
|
||||||
|
# which for Nova always is Tarmac.
|
||||||
|
#
|
||||||
|
class NovaLogFormat(bzrlib.log.GnuChangelogLogFormatter):
|
||||||
|
preferred_levels = 1
|
||||||
|
def log_revision(self, revision):
|
||||||
|
"""Log a revision, either merged or not."""
|
||||||
|
to_file = self.to_file
|
||||||
|
|
||||||
|
date_str = format_date(revision.rev.timestamp,
|
||||||
|
revision.rev.timezone or 0,
|
||||||
|
self.show_timezone,
|
||||||
|
date_fmt='%Y-%m-%d',
|
||||||
|
show_offset=False)
|
||||||
|
|
||||||
|
authors = revision.rev.get_apparent_authors()
|
||||||
|
to_file.write('%s %s\n\n' % (date_str, ", ".join(authors)))
|
||||||
|
|
||||||
|
if revision.delta is not None and revision.delta.has_changed():
|
||||||
|
for c in revision.delta.added + revision.delta.removed + revision.delta.modified:
|
||||||
|
path, = c[:1]
|
||||||
|
to_file.write('\t* %s:\n' % (path,))
|
||||||
|
for c in revision.delta.renamed:
|
||||||
|
oldpath,newpath = c[:2]
|
||||||
|
# For renamed files, show both the old and the new path
|
||||||
|
to_file.write('\t* %s:\n\t* %s:\n' % (oldpath,newpath))
|
||||||
|
to_file.write('\n')
|
||||||
|
|
||||||
|
if not revision.rev.message:
|
||||||
|
to_file.write('\tNo commit message\n')
|
||||||
|
else:
|
||||||
|
message = revision.rev.message.rstrip('\r\n')
|
||||||
|
for l in message.split('\n'):
|
||||||
|
to_file.write('\t%s\n' % (l.lstrip(),))
|
||||||
|
to_file.write('\n')
|
||||||
|
|
||||||
|
bzrlib.log.register_formatter('novalog', NovaLogFormat)
|
||||||
|
|
||||||
@@ -29,11 +29,11 @@ import uuid
|
|||||||
import zipfile
|
import zipfile
|
||||||
|
|
||||||
from nova import crypto
|
from nova import crypto
|
||||||
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.auth import signer
|
from nova.auth import signer
|
||||||
from nova.network import vpn
|
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
@@ -252,6 +252,7 @@ class AuthManager(object):
|
|||||||
__init__ is run every time AuthManager() is called, so we only
|
__init__ is run every time AuthManager() is called, so we only
|
||||||
reset the driver if it is not set or a new driver is specified.
|
reset the driver if it is not set or a new driver is specified.
|
||||||
"""
|
"""
|
||||||
|
self.network_manager = utils.import_object(FLAGS.network_manager)
|
||||||
if driver or not getattr(self, 'driver', None):
|
if driver or not getattr(self, 'driver', None):
|
||||||
self.driver = utils.import_class(driver or FLAGS.auth_driver)
|
self.driver = utils.import_class(driver or FLAGS.auth_driver)
|
||||||
|
|
||||||
@@ -493,8 +494,8 @@ class AuthManager(object):
|
|||||||
return []
|
return []
|
||||||
return [Project(**project_dict) for project_dict in project_list]
|
return [Project(**project_dict) for project_dict in project_list]
|
||||||
|
|
||||||
def create_project(self, name, manager_user,
|
def create_project(self, name, manager_user, description=None,
|
||||||
description=None, member_users=None):
|
member_users=None, context=None):
|
||||||
"""Create a project
|
"""Create a project
|
||||||
|
|
||||||
@type name: str
|
@type name: str
|
||||||
@@ -523,7 +524,14 @@ class AuthManager(object):
|
|||||||
description,
|
description,
|
||||||
member_users)
|
member_users)
|
||||||
if project_dict:
|
if project_dict:
|
||||||
return Project(**project_dict)
|
project = Project(**project_dict)
|
||||||
|
try:
|
||||||
|
self.network_manager.allocate_network(context,
|
||||||
|
project.id)
|
||||||
|
except:
|
||||||
|
drv.delete_project(project.id)
|
||||||
|
raise
|
||||||
|
return project
|
||||||
|
|
||||||
def add_to_project(self, user, project):
|
def add_to_project(self, user, project):
|
||||||
"""Add user to project"""
|
"""Add user to project"""
|
||||||
@@ -550,7 +558,7 @@ class AuthManager(object):
|
|||||||
Project.safe_id(project))
|
Project.safe_id(project))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_project_vpn_data(project):
|
def get_project_vpn_data(project, context=None):
|
||||||
"""Gets vpn ip and port for project
|
"""Gets vpn ip and port for project
|
||||||
|
|
||||||
@type project: Project or project_id
|
@type project: Project or project_id
|
||||||
@@ -560,15 +568,26 @@ class AuthManager(object):
|
|||||||
@return: A tuple containing (ip, port) or None, None if vpn has
|
@return: A tuple containing (ip, port) or None, None if vpn has
|
||||||
not been allocated for user.
|
not been allocated for user.
|
||||||
"""
|
"""
|
||||||
network_data = vpn.NetworkData.lookup(Project.safe_id(project))
|
|
||||||
if not network_data:
|
|
||||||
raise exception.NotFound('project network data has not been set')
|
|
||||||
return (network_data.ip, network_data.port)
|
|
||||||
|
|
||||||
def delete_project(self, project):
|
network_ref = db.project_get_network(context,
|
||||||
|
Project.safe_id(project))
|
||||||
|
|
||||||
|
if not network_ref['vpn_public_port']:
|
||||||
|
raise exception.NotFound('project network data has not been set')
|
||||||
|
return (network_ref['vpn_public_address'],
|
||||||
|
network_ref['vpn_public_port'])
|
||||||
|
|
||||||
|
def delete_project(self, project, context=None):
|
||||||
"""Deletes a project"""
|
"""Deletes a project"""
|
||||||
|
try:
|
||||||
|
network_ref = db.project_get_network(context,
|
||||||
|
Project.safe_id(project))
|
||||||
|
db.network_destroy(context, network_ref['id'])
|
||||||
|
except:
|
||||||
|
logging.exception('Could not destroy network for %s',
|
||||||
|
project)
|
||||||
with self.driver() as drv:
|
with self.driver() as drv:
|
||||||
return drv.delete_project(Project.safe_id(project))
|
drv.delete_project(Project.safe_id(project))
|
||||||
|
|
||||||
def get_user(self, uid):
|
def get_user(self, uid):
|
||||||
"""Retrieves a user by id"""
|
"""Retrieves a user by id"""
|
||||||
@@ -703,15 +722,15 @@ class AuthManager(object):
|
|||||||
zippy.writestr(FLAGS.credential_key_file, private_key)
|
zippy.writestr(FLAGS.credential_key_file, private_key)
|
||||||
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
|
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
|
||||||
|
|
||||||
network_data = vpn.NetworkData.lookup(pid)
|
(vpn_ip, vpn_port) = self.get_project_vpn_data(project)
|
||||||
if network_data:
|
if vpn_ip:
|
||||||
configfile = open(FLAGS.vpn_client_template, "r")
|
configfile = open(FLAGS.vpn_client_template, "r")
|
||||||
s = string.Template(configfile.read())
|
s = string.Template(configfile.read())
|
||||||
configfile.close()
|
configfile.close()
|
||||||
config = s.substitute(keyfile=FLAGS.credential_key_file,
|
config = s.substitute(keyfile=FLAGS.credential_key_file,
|
||||||
certfile=FLAGS.credential_cert_file,
|
certfile=FLAGS.credential_cert_file,
|
||||||
ip=network_data.ip,
|
ip=vpn_ip,
|
||||||
port=network_data.port)
|
port=vpn_port)
|
||||||
zippy.writestr(FLAGS.credential_vpn_file, config)
|
zippy.writestr(FLAGS.credential_vpn_file, config)
|
||||||
else:
|
else:
|
||||||
logging.warn("No vpn data for project %s" %
|
logging.warn("No vpn data for project %s" %
|
||||||
|
|||||||
@@ -26,10 +26,7 @@ before trying to run this.
|
|||||||
import logging
|
import logging
|
||||||
import redis
|
import redis
|
||||||
|
|
||||||
from nova import exception
|
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import utils
|
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
flags.DEFINE_string('redis_host', '127.0.0.1',
|
flags.DEFINE_string('redis_host', '127.0.0.1',
|
||||||
@@ -54,209 +51,3 @@ class Redis(object):
|
|||||||
return cls._instance
|
return cls._instance
|
||||||
|
|
||||||
|
|
||||||
class ConnectionError(exception.Error):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def absorb_connection_error(fn):
|
|
||||||
def _wrapper(*args, **kwargs):
|
|
||||||
try:
|
|
||||||
return fn(*args, **kwargs)
|
|
||||||
except redis.exceptions.ConnectionError, ce:
|
|
||||||
raise ConnectionError(str(ce))
|
|
||||||
return _wrapper
|
|
||||||
|
|
||||||
|
|
||||||
class BasicModel(object):
|
|
||||||
"""
|
|
||||||
All Redis-backed data derives from this class.
|
|
||||||
|
|
||||||
You MUST specify an identifier() property that returns a unique string
|
|
||||||
per instance.
|
|
||||||
|
|
||||||
You MUST have an initializer that takes a single argument that is a value
|
|
||||||
returned by identifier() to load a new class with.
|
|
||||||
|
|
||||||
You may want to specify a dictionary for default_state().
|
|
||||||
|
|
||||||
You may also specify override_type at the class left to use a key other
|
|
||||||
than __class__.__name__.
|
|
||||||
|
|
||||||
You override save and destroy calls to automatically build and destroy
|
|
||||||
associations.
|
|
||||||
"""
|
|
||||||
|
|
||||||
override_type = None
|
|
||||||
|
|
||||||
@absorb_connection_error
|
|
||||||
def __init__(self):
|
|
||||||
state = Redis.instance().hgetall(self.__redis_key)
|
|
||||||
if state:
|
|
||||||
self.initial_state = state
|
|
||||||
self.state = dict(self.initial_state)
|
|
||||||
else:
|
|
||||||
self.initial_state = {}
|
|
||||||
self.state = self.default_state()
|
|
||||||
|
|
||||||
|
|
||||||
def default_state(self):
|
|
||||||
"""You probably want to define this in your subclass"""
|
|
||||||
return {}
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _redis_name(cls):
|
|
||||||
return cls.override_type or cls.__name__.lower()
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def lookup(cls, identifier):
|
|
||||||
rv = cls(identifier)
|
|
||||||
if rv.is_new_record():
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
return rv
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
@absorb_connection_error
|
|
||||||
def all(cls):
|
|
||||||
"""yields all objects in the store"""
|
|
||||||
redis_set = cls._redis_set_name(cls.__name__)
|
|
||||||
for identifier in Redis.instance().smembers(redis_set):
|
|
||||||
yield cls(identifier)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def associated_to(cls, foreign_type, foreign_id):
|
|
||||||
for identifier in cls.associated_keys(foreign_type, foreign_id):
|
|
||||||
yield cls(identifier)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
@absorb_connection_error
|
|
||||||
def associated_keys(cls, foreign_type, foreign_id):
|
|
||||||
redis_set = cls._redis_association_name(foreign_type, foreign_id)
|
|
||||||
return Redis.instance().smembers(redis_set) or []
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _redis_set_name(cls, kls_name):
|
|
||||||
# stupidly pluralize (for compatiblity with previous codebase)
|
|
||||||
return kls_name.lower() + "s"
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _redis_association_name(cls, foreign_type, foreign_id):
|
|
||||||
return cls._redis_set_name("%s:%s:%s" %
|
|
||||||
(foreign_type, foreign_id, cls._redis_name()))
|
|
||||||
|
|
||||||
@property
|
|
||||||
def identifier(self):
|
|
||||||
"""You DEFINITELY want to define this in your subclass"""
|
|
||||||
raise NotImplementedError("Your subclass should define identifier")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def __redis_key(self):
|
|
||||||
return '%s:%s' % (self._redis_name(), self.identifier)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return "<%s:%s>" % (self.__class__.__name__, self.identifier)
|
|
||||||
|
|
||||||
def keys(self):
|
|
||||||
return self.state.keys()
|
|
||||||
|
|
||||||
def copy(self):
|
|
||||||
copyDict = {}
|
|
||||||
for item in self.keys():
|
|
||||||
copyDict[item] = self[item]
|
|
||||||
return copyDict
|
|
||||||
|
|
||||||
def get(self, item, default):
|
|
||||||
return self.state.get(item, default)
|
|
||||||
|
|
||||||
def update(self, update_dict):
|
|
||||||
return self.state.update(update_dict)
|
|
||||||
|
|
||||||
def setdefault(self, item, default):
|
|
||||||
return self.state.setdefault(item, default)
|
|
||||||
|
|
||||||
def __contains__(self, item):
|
|
||||||
return item in self.state
|
|
||||||
|
|
||||||
def __getitem__(self, item):
|
|
||||||
return self.state[item]
|
|
||||||
|
|
||||||
def __setitem__(self, item, val):
|
|
||||||
self.state[item] = val
|
|
||||||
return self.state[item]
|
|
||||||
|
|
||||||
def __delitem__(self, item):
|
|
||||||
"""We don't support this"""
|
|
||||||
raise Exception("Silly monkey, models NEED all their properties.")
|
|
||||||
|
|
||||||
def is_new_record(self):
|
|
||||||
return self.initial_state == {}
|
|
||||||
|
|
||||||
@absorb_connection_error
|
|
||||||
def add_to_index(self):
|
|
||||||
"""Each insance of Foo has its id tracked int the set named Foos"""
|
|
||||||
set_name = self.__class__._redis_set_name(self.__class__.__name__)
|
|
||||||
Redis.instance().sadd(set_name, self.identifier)
|
|
||||||
|
|
||||||
@absorb_connection_error
|
|
||||||
def remove_from_index(self):
|
|
||||||
"""Remove id of this instance from the set tracking ids of this type"""
|
|
||||||
set_name = self.__class__._redis_set_name(self.__class__.__name__)
|
|
||||||
Redis.instance().srem(set_name, self.identifier)
|
|
||||||
|
|
||||||
@absorb_connection_error
|
|
||||||
def associate_with(self, foreign_type, foreign_id):
|
|
||||||
"""Add this class id into the set foreign_type:foreign_id:this_types"""
|
|
||||||
# note the extra 's' on the end is for plurality
|
|
||||||
# to match the old data without requiring a migration of any sort
|
|
||||||
self.add_associated_model_to_its_set(foreign_type, foreign_id)
|
|
||||||
redis_set = self.__class__._redis_association_name(foreign_type,
|
|
||||||
foreign_id)
|
|
||||||
Redis.instance().sadd(redis_set, self.identifier)
|
|
||||||
|
|
||||||
@absorb_connection_error
|
|
||||||
def unassociate_with(self, foreign_type, foreign_id):
|
|
||||||
"""Delete from foreign_type:foreign_id:this_types set"""
|
|
||||||
redis_set = self.__class__._redis_association_name(foreign_type,
|
|
||||||
foreign_id)
|
|
||||||
Redis.instance().srem(redis_set, self.identifier)
|
|
||||||
|
|
||||||
def add_associated_model_to_its_set(self, model_type, model_id):
|
|
||||||
"""
|
|
||||||
When associating an X to a Y, save Y for newer timestamp, etc, and to
|
|
||||||
make sure to save it if Y is a new record.
|
|
||||||
If the model_type isn't found as a usable class, ignore it, this can
|
|
||||||
happen when associating to things stored in LDAP (user, project, ...).
|
|
||||||
"""
|
|
||||||
table = globals()
|
|
||||||
klsname = model_type.capitalize()
|
|
||||||
if table.has_key(klsname):
|
|
||||||
model_class = table[klsname]
|
|
||||||
model_inst = model_class(model_id)
|
|
||||||
model_inst.save()
|
|
||||||
|
|
||||||
@absorb_connection_error
|
|
||||||
def save(self):
|
|
||||||
"""
|
|
||||||
update the directory with the state from this model
|
|
||||||
also add it to the index of items of the same type
|
|
||||||
then set the initial_state = state so new changes are tracked
|
|
||||||
"""
|
|
||||||
# TODO(ja): implement hmset in redis-py and use it
|
|
||||||
# instead of multiple calls to hset
|
|
||||||
if self.is_new_record():
|
|
||||||
self["create_time"] = utils.isotime()
|
|
||||||
for key, val in self.state.iteritems():
|
|
||||||
Redis.instance().hset(self.__redis_key, key, val)
|
|
||||||
self.add_to_index()
|
|
||||||
self.initial_state = dict(self.state)
|
|
||||||
return True
|
|
||||||
|
|
||||||
@absorb_connection_error
|
|
||||||
def destroy(self):
|
|
||||||
"""deletes all related records from datastore."""
|
|
||||||
logging.info("Destroying datamodel for %s %s",
|
|
||||||
self.__class__.__name__, self.identifier)
|
|
||||||
Redis.instance().delete(self.__redis_key)
|
|
||||||
self.remove_from_index()
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|||||||
@@ -22,8 +22,9 @@ Admin API controller, exposed through http via the api worker.
|
|||||||
|
|
||||||
import base64
|
import base64
|
||||||
|
|
||||||
|
from nova import db
|
||||||
|
from nova import exception
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.compute import model
|
|
||||||
|
|
||||||
|
|
||||||
def user_dict(user, base64_file=None):
|
def user_dict(user, base64_file=None):
|
||||||
@@ -193,6 +194,8 @@ class AdminController(object):
|
|||||||
raise exception.ApiError('operation must be add or remove')
|
raise exception.ApiError('operation must be add or remove')
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
# FIXME(vish): these host commands don't work yet, perhaps some of the
|
||||||
|
# required data can be retrieved from service objects?
|
||||||
@admin_only
|
@admin_only
|
||||||
def describe_hosts(self, _context, **_kwargs):
|
def describe_hosts(self, _context, **_kwargs):
|
||||||
"""Returns status info for all nodes. Includes:
|
"""Returns status info for all nodes. Includes:
|
||||||
@@ -203,9 +206,9 @@ class AdminController(object):
|
|||||||
* DHCP servers running
|
* DHCP servers running
|
||||||
* Iptables / bridges
|
* Iptables / bridges
|
||||||
"""
|
"""
|
||||||
return {'hostSet': [host_dict(h) for h in model.Host.all()]}
|
return {'hostSet': [host_dict(h) for h in db.host_get_all()]}
|
||||||
|
|
||||||
@admin_only
|
@admin_only
|
||||||
def describe_host(self, _context, name, **_kwargs):
|
def describe_host(self, _context, name, **_kwargs):
|
||||||
"""Returns status info for single node."""
|
"""Returns status info for single node."""
|
||||||
return host_dict(model.Host.lookup(name))
|
return host_dict(db.host_get(name))
|
||||||
|
|||||||
@@ -29,23 +29,19 @@ import time
|
|||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from nova import datastore
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.auth import rbac
|
from nova.auth import rbac
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.compute import model
|
|
||||||
from nova.compute.instance_types import INSTANCE_TYPES
|
from nova.compute.instance_types import INSTANCE_TYPES
|
||||||
from nova.endpoint import images
|
from nova.endpoint import images
|
||||||
from nova.network import service as network_service
|
|
||||||
from nova.network import model as network_model
|
|
||||||
from nova.volume import service
|
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
|
flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
|
||||||
|
|
||||||
|
|
||||||
def _gen_key(user_id, key_name):
|
def _gen_key(user_id, key_name):
|
||||||
@@ -64,26 +60,16 @@ class CloudController(object):
|
|||||||
sent to the other nodes.
|
sent to the other nodes.
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.instdir = model.InstanceDirectory()
|
self.network_manager = utils.import_object(FLAGS.network_manager)
|
||||||
self.setup()
|
self.setup()
|
||||||
|
|
||||||
@property
|
|
||||||
def instances(self):
|
|
||||||
""" All instances in the system, as dicts """
|
|
||||||
return self.instdir.all
|
|
||||||
|
|
||||||
@property
|
|
||||||
def volumes(self):
|
|
||||||
""" returns a list of all volumes """
|
|
||||||
for volume_id in datastore.Redis.instance().smembers("volumes"):
|
|
||||||
volume = service.get_volume(volume_id)
|
|
||||||
yield volume
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return 'CloudController'
|
return 'CloudController'
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
""" Ensure the keychains and folders exist. """
|
""" Ensure the keychains and folders exist. """
|
||||||
|
# FIXME(ja): this should be moved to a nova-manage command,
|
||||||
|
# if not setup throw exceptions instead of running
|
||||||
# Create keys folder, if it doesn't exist
|
# Create keys folder, if it doesn't exist
|
||||||
if not os.path.exists(FLAGS.keys_path):
|
if not os.path.exists(FLAGS.keys_path):
|
||||||
os.makedirs(FLAGS.keys_path)
|
os.makedirs(FLAGS.keys_path)
|
||||||
@@ -92,18 +78,15 @@ class CloudController(object):
|
|||||||
if not os.path.exists(root_ca_path):
|
if not os.path.exists(root_ca_path):
|
||||||
start = os.getcwd()
|
start = os.getcwd()
|
||||||
os.chdir(FLAGS.ca_path)
|
os.chdir(FLAGS.ca_path)
|
||||||
|
# TODO(vish): Do this with M2Crypto instead
|
||||||
utils.runthis("Generating root CA: %s", "sh genrootca.sh")
|
utils.runthis("Generating root CA: %s", "sh genrootca.sh")
|
||||||
os.chdir(start)
|
os.chdir(start)
|
||||||
# TODO: Do this with M2Crypto instead
|
|
||||||
|
|
||||||
def get_instance_by_ip(self, ip):
|
|
||||||
return self.instdir.by_ip(ip)
|
|
||||||
|
|
||||||
def _get_mpi_data(self, project_id):
|
def _get_mpi_data(self, project_id):
|
||||||
result = {}
|
result = {}
|
||||||
for instance in self.instdir.all:
|
for instance in db.instance_get_by_project(None, project_id):
|
||||||
if instance['project_id'] == project_id:
|
if instance['fixed_ip']:
|
||||||
line = '%s slots=%d' % (instance['private_dns_name'],
|
line = '%s slots=%d' % (instance['fixed_ip']['str_id'],
|
||||||
INSTANCE_TYPES[instance['instance_type']]['vcpus'])
|
INSTANCE_TYPES[instance['instance_type']]['vcpus'])
|
||||||
if instance['key_name'] in result:
|
if instance['key_name'] in result:
|
||||||
result[instance['key_name']].append(line)
|
result[instance['key_name']].append(line)
|
||||||
@@ -111,33 +94,30 @@ class CloudController(object):
|
|||||||
result[instance['key_name']] = [line]
|
result[instance['key_name']] = [line]
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def get_metadata(self, ipaddress):
|
def get_metadata(self, address):
|
||||||
i = self.get_instance_by_ip(ipaddress)
|
instance_ref = db.fixed_ip_get_instance(None, address)
|
||||||
if i is None:
|
if instance_ref is None:
|
||||||
return None
|
return None
|
||||||
mpi = self._get_mpi_data(i['project_id'])
|
mpi = self._get_mpi_data(instance_ref['project_id'])
|
||||||
if i['key_name']:
|
if instance_ref['key_name']:
|
||||||
keys = {
|
keys = {
|
||||||
'0': {
|
'0': {
|
||||||
'_name': i['key_name'],
|
'_name': instance_ref['key_name'],
|
||||||
'openssh-key': i['key_data']
|
'openssh-key': instance_ref['key_data']
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
keys = ''
|
keys = ''
|
||||||
|
hostname = instance_ref['hostname']
|
||||||
address_record = network_model.FixedIp(i['private_dns_name'])
|
floating_ip = db.instance_get_floating_address(None,
|
||||||
if address_record:
|
instance_ref['id'])
|
||||||
hostname = address_record['hostname']
|
|
||||||
else:
|
|
||||||
hostname = 'ip-%s' % i['private_dns_name'].replace('.', '-')
|
|
||||||
data = {
|
data = {
|
||||||
'user-data': base64.b64decode(i['user_data']),
|
'user-data': base64.b64decode(instance_ref['user_data']),
|
||||||
'meta-data': {
|
'meta-data': {
|
||||||
'ami-id': i['image_id'],
|
'ami-id': instance_ref['image_id'],
|
||||||
'ami-launch-index': i['ami_launch_index'],
|
'ami-launch-index': instance_ref['launch_index'],
|
||||||
'ami-manifest-path': 'FIXME', # image property
|
'ami-manifest-path': 'FIXME',
|
||||||
'block-device-mapping': { # TODO: replace with real data
|
'block-device-mapping': { # TODO(vish): replace with real data
|
||||||
'ami': 'sda1',
|
'ami': 'sda1',
|
||||||
'ephemeral0': 'sda2',
|
'ephemeral0': 'sda2',
|
||||||
'root': '/dev/sda1',
|
'root': '/dev/sda1',
|
||||||
@@ -145,27 +125,27 @@ class CloudController(object):
|
|||||||
},
|
},
|
||||||
'hostname': hostname,
|
'hostname': hostname,
|
||||||
'instance-action': 'none',
|
'instance-action': 'none',
|
||||||
'instance-id': i['instance_id'],
|
'instance-id': instance_ref['str_id'],
|
||||||
'instance-type': i.get('instance_type', ''),
|
'instance-type': instance_ref['instance_type'],
|
||||||
'local-hostname': hostname,
|
'local-hostname': hostname,
|
||||||
'local-ipv4': i['private_dns_name'], # TODO: switch to IP
|
'local-ipv4': address,
|
||||||
'kernel-id': i.get('kernel_id', ''),
|
'kernel-id': instance_ref['kernel_id'],
|
||||||
'placement': {
|
'placement': {
|
||||||
'availaibility-zone': i.get('availability_zone', 'nova'),
|
'availability-zone': 'nova' # TODO(vish): real zone
|
||||||
},
|
},
|
||||||
'public-hostname': hostname,
|
'public-hostname': hostname,
|
||||||
'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP
|
'public-ipv4': floating_ip or '',
|
||||||
'public-keys': keys,
|
'public-keys': keys,
|
||||||
'ramdisk-id': i.get('ramdisk_id', ''),
|
'ramdisk-id': instance_ref['ramdisk_id'],
|
||||||
'reservation-id': i['reservation_id'],
|
'reservation-id': instance_ref['reservation_id'],
|
||||||
'security-groups': i.get('groups', ''),
|
'security-groups': '',
|
||||||
'mpi': mpi
|
'mpi': mpi
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if False: # TODO: store ancestor ids
|
if False: # TODO(vish): store ancestor ids
|
||||||
data['ancestor-ami-ids'] = []
|
data['ancestor-ami-ids'] = []
|
||||||
if i.get('product_codes', None):
|
if False: # TODO(vish): store product codes
|
||||||
data['product-codes'] = i['product_codes']
|
data['product-codes'] = []
|
||||||
return data
|
return data
|
||||||
|
|
||||||
@rbac.allow('all')
|
@rbac.allow('all')
|
||||||
@@ -252,141 +232,114 @@ class CloudController(object):
|
|||||||
@rbac.allow('projectmanager', 'sysadmin')
|
@rbac.allow('projectmanager', 'sysadmin')
|
||||||
def get_console_output(self, context, instance_id, **kwargs):
|
def get_console_output(self, context, instance_id, **kwargs):
|
||||||
# instance_id is passed in as a list of instances
|
# instance_id is passed in as a list of instances
|
||||||
instance = self._get_instance(context, instance_id[0])
|
instance_ref = db.instance_get_by_str(context, instance_id[0])
|
||||||
return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
|
return rpc.call('%s.%s' % (FLAGS.compute_topic,
|
||||||
|
instance_ref['host']),
|
||||||
{"method": "get_console_output",
|
{"method": "get_console_output",
|
||||||
"args": {"instance_id": instance_id[0]}})
|
"args": {"context": None,
|
||||||
|
"instance_id": instance_ref['id']}})
|
||||||
def _get_user_id(self, context):
|
|
||||||
if context and context.user:
|
|
||||||
return context.user.id
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
@rbac.allow('projectmanager', 'sysadmin')
|
@rbac.allow('projectmanager', 'sysadmin')
|
||||||
def describe_volumes(self, context, **kwargs):
|
def describe_volumes(self, context, **kwargs):
|
||||||
volumes = []
|
if context.user.is_admin():
|
||||||
for volume in self.volumes:
|
volumes = db.volume_get_all(context)
|
||||||
if context.user.is_admin() or volume['project_id'] == context.project.id:
|
else:
|
||||||
v = self.format_volume(context, volume)
|
volumes = db.volume_get_by_project(context, context.project.id)
|
||||||
volumes.append(v)
|
|
||||||
return defer.succeed({'volumeSet': volumes})
|
|
||||||
|
|
||||||
def format_volume(self, context, volume):
|
volumes = [self._format_volume(context, v) for v in volumes]
|
||||||
|
|
||||||
|
return {'volumeSet': volumes}
|
||||||
|
|
||||||
|
def _format_volume(self, context, volume):
|
||||||
v = {}
|
v = {}
|
||||||
v['volumeId'] = volume['volume_id']
|
v['volumeId'] = volume['str_id']
|
||||||
v['status'] = volume['status']
|
v['status'] = volume['status']
|
||||||
v['size'] = volume['size']
|
v['size'] = volume['size']
|
||||||
v['availabilityZone'] = volume['availability_zone']
|
v['availabilityZone'] = volume['availability_zone']
|
||||||
v['createTime'] = volume['create_time']
|
v['createTime'] = volume['created_at']
|
||||||
if context.user.is_admin():
|
if context.user.is_admin():
|
||||||
v['status'] = '%s (%s, %s, %s, %s)' % (
|
v['status'] = '%s (%s, %s, %s, %s)' % (
|
||||||
volume.get('status', None),
|
volume['status'],
|
||||||
volume.get('user_id', None),
|
volume['user_id'],
|
||||||
volume.get('node_name', None),
|
volume['host'],
|
||||||
volume.get('instance_id', ''),
|
volume['instance_id'],
|
||||||
volume.get('mountpoint', ''))
|
volume['mountpoint'])
|
||||||
if volume['attach_status'] == 'attached':
|
if volume['attach_status'] == 'attached':
|
||||||
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
|
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
|
||||||
'deleteOnTermination': volume['delete_on_termination'],
|
'deleteOnTermination': False,
|
||||||
'device': volume['mountpoint'],
|
'device': volume['mountpoint'],
|
||||||
'instanceId': volume['instance_id'],
|
'instanceId': volume['instance_id'],
|
||||||
'status': 'attached',
|
'status': 'attached',
|
||||||
'volume_id': volume['volume_id']}]
|
'volume_id': volume['str_id']}]
|
||||||
else:
|
else:
|
||||||
v['attachmentSet'] = [{}]
|
v['attachmentSet'] = [{}]
|
||||||
return v
|
return v
|
||||||
|
|
||||||
@rbac.allow('projectmanager', 'sysadmin')
|
@rbac.allow('projectmanager', 'sysadmin')
|
||||||
@defer.inlineCallbacks
|
|
||||||
def create_volume(self, context, size, **kwargs):
|
def create_volume(self, context, size, **kwargs):
|
||||||
# TODO(vish): refactor this to create the volume object here and tell service to create it
|
vol = {}
|
||||||
result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume",
|
vol['size'] = size
|
||||||
"args": {"size": size,
|
vol['user_id'] = context.user.id
|
||||||
"user_id": context.user.id,
|
vol['project_id'] = context.project.id
|
||||||
"project_id": context.project.id}})
|
vol['availability_zone'] = FLAGS.storage_availability_zone
|
||||||
# NOTE(vish): rpc returned value is in the result key in the dictionary
|
vol['status'] = "creating"
|
||||||
volume = self._get_volume(context, result)
|
vol['attach_status'] = "detached"
|
||||||
defer.returnValue({'volumeSet': [self.format_volume(context, volume)]})
|
volume_ref = db.volume_create(context, vol)
|
||||||
|
|
||||||
def _get_address(self, context, public_ip):
|
rpc.cast(FLAGS.volume_topic, {"method": "create_volume",
|
||||||
# FIXME(vish) this should move into network.py
|
"args": {"context": None,
|
||||||
address = network_model.ElasticIp.lookup(public_ip)
|
"volume_id": volume_ref['id']}})
|
||||||
if address and (context.user.is_admin() or address['project_id'] == context.project.id):
|
|
||||||
return address
|
|
||||||
raise exception.NotFound("Address at ip %s not found" % public_ip)
|
|
||||||
|
|
||||||
def _get_image(self, context, image_id):
|
return {'volumeSet': [self._format_volume(context, volume_ref)]}
|
||||||
"""passes in context because
|
|
||||||
objectstore does its own authorization"""
|
|
||||||
result = images.list(context, [image_id])
|
|
||||||
if not result:
|
|
||||||
raise exception.NotFound('Image %s could not be found' % image_id)
|
|
||||||
image = result[0]
|
|
||||||
return image
|
|
||||||
|
|
||||||
def _get_instance(self, context, instance_id):
|
|
||||||
for instance in self.instdir.all:
|
|
||||||
if instance['instance_id'] == instance_id:
|
|
||||||
if context.user.is_admin() or instance['project_id'] == context.project.id:
|
|
||||||
return instance
|
|
||||||
raise exception.NotFound('Instance %s could not be found' % instance_id)
|
|
||||||
|
|
||||||
def _get_volume(self, context, volume_id):
|
|
||||||
volume = service.get_volume(volume_id)
|
|
||||||
if context.user.is_admin() or volume['project_id'] == context.project.id:
|
|
||||||
return volume
|
|
||||||
raise exception.NotFound('Volume %s could not be found' % volume_id)
|
|
||||||
|
|
||||||
@rbac.allow('projectmanager', 'sysadmin')
|
@rbac.allow('projectmanager', 'sysadmin')
|
||||||
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
|
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
|
||||||
volume = self._get_volume(context, volume_id)
|
volume_ref = db.volume_get_by_str(context, volume_id)
|
||||||
if volume['status'] == "attached":
|
# TODO(vish): abstract status checking?
|
||||||
|
if volume_ref['attach_status'] == "attached":
|
||||||
raise exception.ApiError("Volume is already attached")
|
raise exception.ApiError("Volume is already attached")
|
||||||
# TODO(vish): looping through all volumes is slow. We should probably maintain an index
|
instance_ref = db.instance_get_by_str(context, instance_id)
|
||||||
for vol in self.volumes:
|
host = instance_ref['host']
|
||||||
if vol['instance_id'] == instance_id and vol['mountpoint'] == device:
|
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||||
raise exception.ApiError("Volume %s is already attached to %s" % (vol['volume_id'], vol['mountpoint']))
|
|
||||||
volume.start_attach(instance_id, device)
|
|
||||||
instance = self._get_instance(context, instance_id)
|
|
||||||
compute_node = instance['node_name']
|
|
||||||
rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node),
|
|
||||||
{"method": "attach_volume",
|
{"method": "attach_volume",
|
||||||
"args": {"volume_id": volume_id,
|
"args": {"context": None,
|
||||||
"instance_id": instance_id,
|
"volume_id": volume_ref['id'],
|
||||||
|
"instance_id": instance_ref['id'],
|
||||||
"mountpoint": device}})
|
"mountpoint": device}})
|
||||||
return defer.succeed({'attachTime': volume['attach_time'],
|
return defer.succeed({'attachTime': volume_ref['attach_time'],
|
||||||
'device': volume['mountpoint'],
|
'device': volume_ref['mountpoint'],
|
||||||
'instanceId': instance_id,
|
'instanceId': instance_ref['id'],
|
||||||
'requestId': context.request_id,
|
'requestId': context.request_id,
|
||||||
'status': volume['attach_status'],
|
'status': volume_ref['attach_status'],
|
||||||
'volumeId': volume_id})
|
'volumeId': volume_ref['id']})
|
||||||
|
|
||||||
@rbac.allow('projectmanager', 'sysadmin')
|
@rbac.allow('projectmanager', 'sysadmin')
|
||||||
def detach_volume(self, context, volume_id, **kwargs):
|
def detach_volume(self, context, volume_id, **kwargs):
|
||||||
volume = self._get_volume(context, volume_id)
|
volume_ref = db.volume_get_by_str(context, volume_id)
|
||||||
instance_id = volume.get('instance_id', None)
|
instance_ref = db.volume_get_instance(context, volume_ref['id'])
|
||||||
if not instance_id:
|
if not instance_ref:
|
||||||
raise exception.Error("Volume isn't attached to anything!")
|
raise exception.Error("Volume isn't attached to anything!")
|
||||||
if volume['status'] == "available":
|
# TODO(vish): abstract status checking?
|
||||||
|
if volume_ref['status'] == "available":
|
||||||
raise exception.Error("Volume is already detached")
|
raise exception.Error("Volume is already detached")
|
||||||
try:
|
try:
|
||||||
volume.start_detach()
|
host = instance_ref['host']
|
||||||
instance = self._get_instance(context, instance_id)
|
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||||
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
|
|
||||||
{"method": "detach_volume",
|
{"method": "detach_volume",
|
||||||
"args": {"instance_id": instance_id,
|
"args": {"context": None,
|
||||||
"volume_id": volume_id}})
|
"instance_id": instance_ref['id'],
|
||||||
|
"volume_id": volume_ref['id']}})
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
# If the instance doesn't exist anymore,
|
# If the instance doesn't exist anymore,
|
||||||
# then we need to call detach blind
|
# then we need to call detach blind
|
||||||
volume.finish_detach()
|
db.volume_detached(context)
|
||||||
return defer.succeed({'attachTime': volume['attach_time'],
|
return defer.succeed({'attachTime': volume_ref['attach_time'],
|
||||||
'device': volume['mountpoint'],
|
'device': volume_ref['mountpoint'],
|
||||||
'instanceId': instance_id,
|
'instanceId': instance_ref['str_id'],
|
||||||
'requestId': context.request_id,
|
'requestId': context.request_id,
|
||||||
'status': volume['attach_status'],
|
'status': volume_ref['attach_status'],
|
||||||
'volumeId': volume_id})
|
'volumeId': volume_ref['id']})
|
||||||
|
|
||||||
def _convert_to_set(self, lst, label):
|
def _convert_to_set(self, lst, label):
|
||||||
if lst == None or lst == []:
|
if lst == None or lst == []:
|
||||||
@@ -409,50 +362,53 @@ class CloudController(object):
|
|||||||
|
|
||||||
def _format_instances(self, context, reservation_id=None):
|
def _format_instances(self, context, reservation_id=None):
|
||||||
reservations = {}
|
reservations = {}
|
||||||
if context.user.is_admin():
|
if reservation_id:
|
||||||
instgenerator = self.instdir.all
|
instances = db.instance_get_by_reservation(context,
|
||||||
|
reservation_id)
|
||||||
else:
|
else:
|
||||||
instgenerator = self.instdir.by_project(context.project.id)
|
if not context.user.is_admin():
|
||||||
for instance in instgenerator:
|
instances = db.instance_get_all(context)
|
||||||
res_id = instance.get('reservation_id', 'Unknown')
|
else:
|
||||||
if reservation_id != None and reservation_id != res_id:
|
instances = db.instance_get_by_project(context,
|
||||||
continue
|
context.project.id)
|
||||||
|
for instance in instances:
|
||||||
if not context.user.is_admin():
|
if not context.user.is_admin():
|
||||||
if instance['image_id'] == FLAGS.vpn_image_id:
|
if instance['image_id'] == FLAGS.vpn_image_id:
|
||||||
continue
|
continue
|
||||||
i = {}
|
i = {}
|
||||||
i['instance_id'] = instance.get('instance_id', None)
|
i['instanceId'] = instance['str_id']
|
||||||
i['image_id'] = instance.get('image_id', None)
|
i['imageId'] = instance['image_id']
|
||||||
i['instance_state'] = {
|
i['instanceState'] = {
|
||||||
'code': instance.get('state', 0),
|
'code': instance['state'],
|
||||||
'name': instance.get('state_description', 'pending')
|
'name': instance['state_description']
|
||||||
}
|
}
|
||||||
i['public_dns_name'] = network_model.get_public_ip_for_instance(
|
fixed_addr = None
|
||||||
i['instance_id'])
|
floating_addr = None
|
||||||
i['private_dns_name'] = instance.get('private_dns_name', None)
|
if instance['fixed_ip']:
|
||||||
if not i['public_dns_name']:
|
fixed_addr = instance['fixed_ip']['str_id']
|
||||||
i['public_dns_name'] = i['private_dns_name']
|
if instance['fixed_ip']['floating_ips']:
|
||||||
i['dns_name'] = instance.get('dns_name', None)
|
fixed = instance['fixed_ip']
|
||||||
i['key_name'] = instance.get('key_name', None)
|
floating_addr = fixed['floating_ips'][0]['str_id']
|
||||||
|
i['privateDnsName'] = fixed_addr
|
||||||
|
i['publicDnsName'] = floating_addr
|
||||||
|
i['dnsName'] = i['publicDnsName'] or i['privateDnsName']
|
||||||
|
i['keyName'] = instance['key_name']
|
||||||
if context.user.is_admin():
|
if context.user.is_admin():
|
||||||
i['key_name'] = '%s (%s, %s)' % (i['key_name'],
|
i['keyName'] = '%s (%s, %s)' % (i['keyName'],
|
||||||
instance.get('project_id', None),
|
instance['project_id'],
|
||||||
instance.get('node_name', ''))
|
instance['host'])
|
||||||
i['product_codes_set'] = self._convert_to_set(
|
i['productCodesSet'] = self._convert_to_set([], 'product_codes')
|
||||||
instance.get('product_codes', None), 'product_code')
|
i['instanceType'] = instance['instance_type']
|
||||||
i['instance_type'] = instance.get('instance_type', None)
|
i['launchTime'] = instance['created_at']
|
||||||
i['launch_time'] = instance.get('launch_time', None)
|
i['amiLaunchIndex'] = instance['launch_index']
|
||||||
i['ami_launch_index'] = instance.get('ami_launch_index',
|
if not reservations.has_key(instance['reservation_id']):
|
||||||
None)
|
|
||||||
if not reservations.has_key(res_id):
|
|
||||||
r = {}
|
r = {}
|
||||||
r['reservation_id'] = res_id
|
r['reservationId'] = instance['reservation_id']
|
||||||
r['owner_id'] = instance.get('project_id', None)
|
r['ownerId'] = instance['project_id']
|
||||||
r['group_set'] = self._convert_to_set(
|
r['groupSet'] = self._convert_to_set([], 'groups')
|
||||||
instance.get('groups', None), 'group_id')
|
r['instancesSet'] = []
|
||||||
r['instances_set'] = []
|
reservations[instance['reservation_id']] = r
|
||||||
reservations[res_id] = r
|
reservations[instance['reservation_id']]['instancesSet'].append(i)
|
||||||
reservations[res_id]['instances_set'].append(i)
|
|
||||||
|
|
||||||
return list(reservations.values())
|
return list(reservations.values())
|
||||||
|
|
||||||
@@ -462,20 +418,23 @@ class CloudController(object):
|
|||||||
|
|
||||||
def format_addresses(self, context):
|
def format_addresses(self, context):
|
||||||
addresses = []
|
addresses = []
|
||||||
for address in network_model.ElasticIp.all():
|
|
||||||
# TODO(vish): implement a by_project iterator for addresses
|
|
||||||
if (context.user.is_admin() or
|
|
||||||
address['project_id'] == context.project.id):
|
|
||||||
address_rv = {
|
|
||||||
'public_ip': address['address'],
|
|
||||||
'instance_id': address.get('instance_id', 'free')
|
|
||||||
}
|
|
||||||
if context.user.is_admin():
|
if context.user.is_admin():
|
||||||
address_rv['instance_id'] = "%s (%s, %s)" % (
|
iterator = db.floating_ip_get_all(context)
|
||||||
address['instance_id'],
|
else:
|
||||||
address['user_id'],
|
iterator = db.floating_ip_get_by_project(context,
|
||||||
address['project_id'],
|
context.project.id)
|
||||||
)
|
for floating_ip_ref in iterator:
|
||||||
|
address = floating_ip_ref['str_id']
|
||||||
|
instance_id = None
|
||||||
|
if (floating_ip_ref['fixed_ip']
|
||||||
|
and floating_ip_ref['fixed_ip']['instance']):
|
||||||
|
instance_id = floating_ip_ref['fixed_ip']['instance']['str_id']
|
||||||
|
address_rv = {'public_ip': address,
|
||||||
|
'instance_id': instance_id}
|
||||||
|
if context.user.is_admin():
|
||||||
|
details = "%s (%s)" % (address_rv['instance_id'],
|
||||||
|
floating_ip_ref['project_id'])
|
||||||
|
address_rv['instance_id'] = details
|
||||||
addresses.append(address_rv)
|
addresses.append(address_rv)
|
||||||
return {'addressesSet': addresses}
|
return {'addressesSet': addresses}
|
||||||
|
|
||||||
@@ -484,8 +443,8 @@ class CloudController(object):
|
|||||||
def allocate_address(self, context, **kwargs):
|
def allocate_address(self, context, **kwargs):
|
||||||
network_topic = yield self._get_network_topic(context)
|
network_topic = yield self._get_network_topic(context)
|
||||||
public_ip = yield rpc.call(network_topic,
|
public_ip = yield rpc.call(network_topic,
|
||||||
{"method": "allocate_elastic_ip",
|
{"method": "allocate_floating_ip",
|
||||||
"args": {"user_id": context.user.id,
|
"args": {"context": None,
|
||||||
"project_id": context.project.id}})
|
"project_id": context.project.id}})
|
||||||
defer.returnValue({'addressSet': [{'publicIp': public_ip}]})
|
defer.returnValue({'addressSet': [{'publicIp': public_ip}]})
|
||||||
|
|
||||||
@@ -493,56 +452,62 @@ class CloudController(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def release_address(self, context, public_ip, **kwargs):
|
def release_address(self, context, public_ip, **kwargs):
|
||||||
# NOTE(vish): Should we make sure this works?
|
# NOTE(vish): Should we make sure this works?
|
||||||
|
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
|
||||||
network_topic = yield self._get_network_topic(context)
|
network_topic = yield self._get_network_topic(context)
|
||||||
rpc.cast(network_topic,
|
rpc.cast(network_topic,
|
||||||
{"method": "deallocate_elastic_ip",
|
{"method": "deallocate_floating_ip",
|
||||||
"args": {"elastic_ip": public_ip}})
|
"args": {"context": None,
|
||||||
|
"floating_address": floating_ip_ref['str_id']}})
|
||||||
defer.returnValue({'releaseResponse': ["Address released."]})
|
defer.returnValue({'releaseResponse': ["Address released."]})
|
||||||
|
|
||||||
@rbac.allow('netadmin')
|
@rbac.allow('netadmin')
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def associate_address(self, context, instance_id, public_ip, **kwargs):
|
def associate_address(self, context, instance_id, public_ip, **kwargs):
|
||||||
instance = self._get_instance(context, instance_id)
|
instance_ref = db.instance_get_by_str(context, instance_id)
|
||||||
address = self._get_address(context, public_ip)
|
fixed_ip_ref = db.fixed_ip_get_by_instance(context, instance_ref['id'])
|
||||||
|
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
|
||||||
network_topic = yield self._get_network_topic(context)
|
network_topic = yield self._get_network_topic(context)
|
||||||
rpc.cast(network_topic,
|
rpc.cast(network_topic,
|
||||||
{"method": "associate_elastic_ip",
|
{"method": "associate_floating_ip",
|
||||||
"args": {"elastic_ip": address['address'],
|
"args": {"context": None,
|
||||||
"fixed_ip": instance['private_dns_name'],
|
"floating_address": floating_ip_ref['str_id'],
|
||||||
"instance_id": instance['instance_id']}})
|
"fixed_address": fixed_ip_ref['str_id']}})
|
||||||
defer.returnValue({'associateResponse': ["Address associated."]})
|
defer.returnValue({'associateResponse': ["Address associated."]})
|
||||||
|
|
||||||
@rbac.allow('netadmin')
|
@rbac.allow('netadmin')
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def disassociate_address(self, context, public_ip, **kwargs):
|
def disassociate_address(self, context, public_ip, **kwargs):
|
||||||
address = self._get_address(context, public_ip)
|
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
|
||||||
network_topic = yield self._get_network_topic(context)
|
network_topic = yield self._get_network_topic(context)
|
||||||
rpc.cast(network_topic,
|
rpc.cast(network_topic,
|
||||||
{"method": "disassociate_elastic_ip",
|
{"method": "disassociate_floating_ip",
|
||||||
"args": {"elastic_ip": address['address']}})
|
"args": {"context": None,
|
||||||
|
"floating_address": floating_ip_ref['str_id']}})
|
||||||
defer.returnValue({'disassociateResponse': ["Address disassociated."]})
|
defer.returnValue({'disassociateResponse': ["Address disassociated."]})
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _get_network_topic(self, context):
|
def _get_network_topic(self, context):
|
||||||
"""Retrieves the network host for a project"""
|
"""Retrieves the network host for a project"""
|
||||||
host = network_service.get_host_for_project(context.project.id)
|
network_ref = db.project_get_network(context, context.project.id)
|
||||||
|
host = network_ref['host']
|
||||||
if not host:
|
if not host:
|
||||||
host = yield rpc.call(FLAGS.network_topic,
|
host = yield rpc.call(FLAGS.network_topic,
|
||||||
{"method": "set_network_host",
|
{"method": "set_network_host",
|
||||||
"args": {"user_id": context.user.id,
|
"args": {"context": None,
|
||||||
"project_id": context.project.id}})
|
"project_id": context.project.id}})
|
||||||
defer.returnValue('%s.%s' %(FLAGS.network_topic, host))
|
defer.returnValue(db.queue_get_for(context, FLAGS.network_topic, host))
|
||||||
|
|
||||||
@rbac.allow('projectmanager', 'sysadmin')
|
@rbac.allow('projectmanager', 'sysadmin')
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def run_instances(self, context, **kwargs):
|
def run_instances(self, context, **kwargs):
|
||||||
# make sure user can access the image
|
# make sure user can access the image
|
||||||
# vpn image is private so it doesn't show up on lists
|
# vpn image is private so it doesn't show up on lists
|
||||||
if kwargs['image_id'] != FLAGS.vpn_image_id:
|
vpn = kwargs['image_id'] == FLAGS.vpn_image_id
|
||||||
image = self._get_image(context, kwargs['image_id'])
|
|
||||||
|
|
||||||
# FIXME(ja): if image is cloudpipe, this breaks
|
if not vpn:
|
||||||
|
image = images.get(context, kwargs['image_id'])
|
||||||
|
|
||||||
|
# FIXME(ja): if image is vpn, this breaks
|
||||||
# get defaults from imagestore
|
# get defaults from imagestore
|
||||||
image_id = image['imageId']
|
image_id = image['imageId']
|
||||||
kernel_id = image.get('kernelId', FLAGS.default_kernel)
|
kernel_id = image.get('kernelId', FLAGS.default_kernel)
|
||||||
@@ -553,11 +518,10 @@ class CloudController(object):
|
|||||||
ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id)
|
ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id)
|
||||||
|
|
||||||
# make sure we have access to kernel and ramdisk
|
# make sure we have access to kernel and ramdisk
|
||||||
self._get_image(context, kernel_id)
|
images.get(context, kernel_id)
|
||||||
self._get_image(context, ramdisk_id)
|
images.get(context, ramdisk_id)
|
||||||
|
|
||||||
logging.debug("Going to run instances...")
|
logging.debug("Going to run instances...")
|
||||||
reservation_id = utils.generate_uid('r')
|
|
||||||
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
|
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
|
||||||
key_data = None
|
key_data = None
|
||||||
if kwargs.has_key('key_name'):
|
if kwargs.has_key('key_name'):
|
||||||
@@ -566,107 +530,122 @@ class CloudController(object):
|
|||||||
raise exception.ApiError('Key Pair %s not found' %
|
raise exception.ApiError('Key Pair %s not found' %
|
||||||
kwargs['key_name'])
|
kwargs['key_name'])
|
||||||
key_data = key_pair.public_key
|
key_data = key_pair.public_key
|
||||||
network_topic = yield self._get_network_topic(context)
|
|
||||||
# TODO: Get the real security group of launch in here
|
# TODO: Get the real security group of launch in here
|
||||||
security_group = "default"
|
security_group = "default"
|
||||||
for num in range(int(kwargs['max_count'])):
|
|
||||||
is_vpn = False
|
|
||||||
if image_id == FLAGS.vpn_image_id:
|
|
||||||
is_vpn = True
|
|
||||||
inst = self.instdir.new()
|
|
||||||
allocate_data = yield rpc.call(network_topic,
|
|
||||||
{"method": "allocate_fixed_ip",
|
|
||||||
"args": {"user_id": context.user.id,
|
|
||||||
"project_id": context.project.id,
|
|
||||||
"security_group": security_group,
|
|
||||||
"is_vpn": is_vpn,
|
|
||||||
"hostname": inst.instance_id}})
|
|
||||||
inst['image_id'] = image_id
|
|
||||||
inst['kernel_id'] = kernel_id
|
|
||||||
inst['ramdisk_id'] = ramdisk_id
|
|
||||||
inst['user_data'] = kwargs.get('user_data', '')
|
|
||||||
inst['instance_type'] = kwargs.get('instance_type', 'm1.small')
|
|
||||||
inst['reservation_id'] = reservation_id
|
|
||||||
inst['launch_time'] = launch_time
|
|
||||||
inst['key_data'] = key_data or ''
|
|
||||||
inst['key_name'] = kwargs.get('key_name', '')
|
|
||||||
inst['user_id'] = context.user.id
|
|
||||||
inst['project_id'] = context.project.id
|
|
||||||
inst['ami_launch_index'] = num
|
|
||||||
inst['security_group'] = security_group
|
|
||||||
inst['hostname'] = inst.instance_id
|
|
||||||
for (key, value) in allocate_data.iteritems():
|
|
||||||
inst[key] = value
|
|
||||||
|
|
||||||
inst.save()
|
reservation_id = utils.generate_uid('r')
|
||||||
|
base_options = {}
|
||||||
|
base_options['image_id'] = image_id
|
||||||
|
base_options['kernel_id'] = kernel_id
|
||||||
|
base_options['ramdisk_id'] = ramdisk_id
|
||||||
|
base_options['reservation_id'] = reservation_id
|
||||||
|
base_options['key_data'] = key_data
|
||||||
|
base_options['key_name'] = kwargs.get('key_name', None)
|
||||||
|
base_options['user_id'] = context.user.id
|
||||||
|
base_options['project_id'] = context.project.id
|
||||||
|
base_options['user_data'] = kwargs.get('user_data', '')
|
||||||
|
base_options['instance_type'] = kwargs.get('instance_type', 'm1.small')
|
||||||
|
base_options['security_group'] = security_group
|
||||||
|
|
||||||
|
for num in range(int(kwargs['max_count'])):
|
||||||
|
instance_ref = db.instance_create(context, base_options)
|
||||||
|
inst_id = instance_ref['id']
|
||||||
|
|
||||||
|
inst = {}
|
||||||
|
inst['mac_address'] = utils.generate_mac()
|
||||||
|
inst['launch_index'] = num
|
||||||
|
inst['hostname'] = instance_ref['str_id']
|
||||||
|
db.instance_update(context, inst_id, inst)
|
||||||
|
address = self.network_manager.allocate_fixed_ip(context,
|
||||||
|
inst_id,
|
||||||
|
vpn)
|
||||||
|
|
||||||
|
# TODO(vish): This probably should be done in the scheduler
|
||||||
|
# network is setup when host is assigned
|
||||||
|
network_topic = yield self._get_network_topic(context)
|
||||||
|
rpc.call(network_topic,
|
||||||
|
{"method": "setup_fixed_ip",
|
||||||
|
"args": {"context": None,
|
||||||
|
"address": address}})
|
||||||
|
|
||||||
rpc.cast(FLAGS.compute_topic,
|
rpc.cast(FLAGS.compute_topic,
|
||||||
{"method": "run_instance",
|
{"method": "run_instance",
|
||||||
"args": {"instance_id": inst.instance_id}})
|
"args": {"context": None,
|
||||||
logging.debug("Casting to node for %s's instance with IP of %s" %
|
"instance_id": inst_id}})
|
||||||
(context.user.name, inst['private_dns_name']))
|
logging.debug("Casting to node for %s/%s's instance %s" %
|
||||||
# TODO: Make Network figure out the network name from ip.
|
(context.project.name, context.user.name, inst_id))
|
||||||
defer.returnValue(self._format_run_instances(context, reservation_id))
|
defer.returnValue(self._format_run_instances(context,
|
||||||
|
reservation_id))
|
||||||
|
|
||||||
|
|
||||||
@rbac.allow('projectmanager', 'sysadmin')
|
@rbac.allow('projectmanager', 'sysadmin')
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def terminate_instances(self, context, instance_id, **kwargs):
|
def terminate_instances(self, context, instance_id, **kwargs):
|
||||||
logging.debug("Going to start terminating instances")
|
logging.debug("Going to start terminating instances")
|
||||||
network_topic = yield self._get_network_topic(context)
|
for id_str in instance_id:
|
||||||
for i in instance_id:
|
logging.debug("Going to try and terminate %s" % id_str)
|
||||||
logging.debug("Going to try and terminate %s" % i)
|
|
||||||
try:
|
try:
|
||||||
instance = self._get_instance(context, i)
|
instance_ref = db.instance_get_by_str(context, id_str)
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
logging.warning("Instance %s was not found during terminate"
|
logging.warning("Instance %s was not found during terminate"
|
||||||
% i)
|
% id_str)
|
||||||
continue
|
continue
|
||||||
elastic_ip = network_model.get_public_ip_for_instance(i)
|
|
||||||
if elastic_ip:
|
# FIXME(ja): where should network deallocate occur?
|
||||||
logging.debug("Disassociating address %s" % elastic_ip)
|
address = db.instance_get_floating_address(context,
|
||||||
|
instance_ref['id'])
|
||||||
|
if address:
|
||||||
|
logging.debug("Disassociating address %s" % address)
|
||||||
# NOTE(vish): Right now we don't really care if the ip is
|
# NOTE(vish): Right now we don't really care if the ip is
|
||||||
# disassociated. We may need to worry about
|
# disassociated. We may need to worry about
|
||||||
# checking this later. Perhaps in the scheduler?
|
# checking this later. Perhaps in the scheduler?
|
||||||
|
network_topic = yield self._get_network_topic(context)
|
||||||
rpc.cast(network_topic,
|
rpc.cast(network_topic,
|
||||||
{"method": "disassociate_elastic_ip",
|
{"method": "disassociate_floating_ip",
|
||||||
"args": {"elastic_ip": elastic_ip}})
|
"args": {"context": None,
|
||||||
|
"address": address}})
|
||||||
|
|
||||||
fixed_ip = instance.get('private_dns_name', None)
|
address = db.instance_get_fixed_address(context,
|
||||||
if fixed_ip:
|
instance_ref['id'])
|
||||||
logging.debug("Deallocating address %s" % fixed_ip)
|
if address:
|
||||||
# NOTE(vish): Right now we don't really care if the ip is
|
logging.debug("Deallocating address %s" % address)
|
||||||
# actually removed. We may need to worry about
|
# NOTE(vish): Currently, nothing needs to be done on the
|
||||||
# checking this later. Perhaps in the scheduler?
|
# network node until release. If this changes,
|
||||||
rpc.cast(network_topic,
|
# we will need to cast here.
|
||||||
{"method": "deallocate_fixed_ip",
|
self.network.deallocate_fixed_ip(context, address)
|
||||||
"args": {"fixed_ip": fixed_ip}})
|
|
||||||
|
|
||||||
if instance.get('node_name', 'unassigned') != 'unassigned':
|
host = instance_ref['host']
|
||||||
# NOTE(joshua?): It's also internal default
|
if host:
|
||||||
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
|
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||||
{"method": "terminate_instance",
|
{"method": "terminate_instance",
|
||||||
"args": {"instance_id": i}})
|
"args": {"context": None,
|
||||||
|
"instance_id": instance_ref['id']}})
|
||||||
else:
|
else:
|
||||||
instance.destroy()
|
db.instance_destroy(context, instance_ref['id'])
|
||||||
defer.returnValue(True)
|
defer.returnValue(True)
|
||||||
|
|
||||||
@rbac.allow('projectmanager', 'sysadmin')
|
@rbac.allow('projectmanager', 'sysadmin')
|
||||||
def reboot_instances(self, context, instance_id, **kwargs):
|
def reboot_instances(self, context, instance_id, **kwargs):
|
||||||
"""instance_id is a list of instance ids"""
|
"""instance_id is a list of instance ids"""
|
||||||
for i in instance_id:
|
for id_str in instance_id:
|
||||||
instance = self._get_instance(context, i)
|
instance_ref = db.instance_get_by_str(context, id_str)
|
||||||
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
|
host = instance_ref['host']
|
||||||
|
rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
|
||||||
{"method": "reboot_instance",
|
{"method": "reboot_instance",
|
||||||
"args": {"instance_id": i}})
|
"args": {"context": None,
|
||||||
|
"instance_id": instance_ref['id']}})
|
||||||
return defer.succeed(True)
|
return defer.succeed(True)
|
||||||
|
|
||||||
@rbac.allow('projectmanager', 'sysadmin')
|
@rbac.allow('projectmanager', 'sysadmin')
|
||||||
def delete_volume(self, context, volume_id, **kwargs):
|
def delete_volume(self, context, volume_id, **kwargs):
|
||||||
# TODO: return error if not authorized
|
# TODO: return error if not authorized
|
||||||
volume = self._get_volume(context, volume_id)
|
volume_ref = db.volume_get_by_str(context, volume_id)
|
||||||
volume_node = volume['node_name']
|
host = volume_ref['host']
|
||||||
rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node),
|
rpc.cast(db.queue_get_for(context, FLAGS.volume_topic, host),
|
||||||
{"method": "delete_volume",
|
{"method": "delete_volume",
|
||||||
"args": {"volume_id": volume_id}})
|
"args": {"context": None,
|
||||||
|
"volume_id": volume_ref['id']}})
|
||||||
return defer.succeed(True)
|
return defer.succeed(True)
|
||||||
|
|
||||||
@rbac.allow('all')
|
@rbac.allow('all')
|
||||||
@@ -717,23 +696,3 @@ class CloudController(object):
|
|||||||
raise exception.ApiError('operation_type must be add or remove')
|
raise exception.ApiError('operation_type must be add or remove')
|
||||||
result = images.modify(context, image_id, operation_type)
|
result = images.modify(context, image_id, operation_type)
|
||||||
return defer.succeed(result)
|
return defer.succeed(result)
|
||||||
|
|
||||||
def update_state(self, topic, value):
|
|
||||||
""" accepts status reports from the queue and consolidates them """
|
|
||||||
# TODO(jmc): if an instance has disappeared from
|
|
||||||
# the node, call instance_death
|
|
||||||
if topic == "instances":
|
|
||||||
return defer.succeed(True)
|
|
||||||
aggregate_state = getattr(self, topic)
|
|
||||||
node_name = value.keys()[0]
|
|
||||||
items = value[node_name]
|
|
||||||
|
|
||||||
logging.debug("Updating %s state for %s" % (topic, node_name))
|
|
||||||
|
|
||||||
for item_id in items.keys():
|
|
||||||
if (aggregate_state.has_key('pending') and
|
|
||||||
aggregate_state['pending'].has_key(item_id)):
|
|
||||||
del aggregate_state['pending'][item_id]
|
|
||||||
aggregate_state[node_name] = items
|
|
||||||
|
|
||||||
return defer.succeed(True)
|
|
||||||
|
|||||||
@@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
Proxy AMI-related calls from the cloud controller, to the running
|
Proxy AMI-related calls from the cloud controller, to the running
|
||||||
objectstore daemon.
|
objectstore service.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
@@ -26,6 +26,7 @@ import urllib
|
|||||||
|
|
||||||
import boto.s3.connection
|
import boto.s3.connection
|
||||||
|
|
||||||
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
@@ -55,7 +56,6 @@ def register(context, image_location):
|
|||||||
|
|
||||||
return image_id
|
return image_id
|
||||||
|
|
||||||
|
|
||||||
def list(context, filter_list=[]):
|
def list(context, filter_list=[]):
|
||||||
""" return a list of all images that a user can see
|
""" return a list of all images that a user can see
|
||||||
|
|
||||||
@@ -71,6 +71,14 @@ def list(context, filter_list=[]):
|
|||||||
return [i for i in result if i['imageId'] in filter_list]
|
return [i for i in result if i['imageId'] in filter_list]
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
def get(context, image_id):
|
||||||
|
"""return a image object if the context has permissions"""
|
||||||
|
result = list(context, [image_id])
|
||||||
|
if not result:
|
||||||
|
raise exception.NotFound('Image %s could not be found' % image_id)
|
||||||
|
image = result[0]
|
||||||
|
return image
|
||||||
|
|
||||||
|
|
||||||
def deregister(context, image_id):
|
def deregister(context, image_id):
|
||||||
""" unregister an image """
|
""" unregister an image """
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ where they're used.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import getopt
|
import getopt
|
||||||
|
import os
|
||||||
import socket
|
import socket
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@@ -74,7 +75,7 @@ class FlagValues(gflags.FlagValues):
|
|||||||
unparsed_args = sneaky_unparsed_args['value']
|
unparsed_args = sneaky_unparsed_args['value']
|
||||||
if unparsed_args:
|
if unparsed_args:
|
||||||
if self.IsGnuGetOpt():
|
if self.IsGnuGetOpt():
|
||||||
args = argv[:1] + unparsed
|
args = argv[:1] + unparsed_args
|
||||||
else:
|
else:
|
||||||
args = argv[:1] + original_argv[-len(unparsed_args):]
|
args = argv[:1] + original_argv[-len(unparsed_args):]
|
||||||
else:
|
else:
|
||||||
@@ -141,6 +142,7 @@ def _wrapper(func):
|
|||||||
return _wrapped
|
return _wrapped
|
||||||
|
|
||||||
|
|
||||||
|
DEFINE = _wrapper(gflags.DEFINE)
|
||||||
DEFINE_string = _wrapper(gflags.DEFINE_string)
|
DEFINE_string = _wrapper(gflags.DEFINE_string)
|
||||||
DEFINE_integer = _wrapper(gflags.DEFINE_integer)
|
DEFINE_integer = _wrapper(gflags.DEFINE_integer)
|
||||||
DEFINE_bool = _wrapper(gflags.DEFINE_bool)
|
DEFINE_bool = _wrapper(gflags.DEFINE_bool)
|
||||||
@@ -168,7 +170,6 @@ def DECLARE(name, module_string, flag_values=FLAGS):
|
|||||||
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
|
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
|
||||||
DEFINE_integer('s3_port', 3333, 's3 port')
|
DEFINE_integer('s3_port', 3333, 's3 port')
|
||||||
DEFINE_string('s3_host', '127.0.0.1', 's3 host')
|
DEFINE_string('s3_host', '127.0.0.1', 's3 host')
|
||||||
#DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
|
|
||||||
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
|
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
|
||||||
DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on')
|
DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on')
|
||||||
DEFINE_string('network_topic', 'network', 'the topic network nodes listen on')
|
DEFINE_string('network_topic', 'network', 'the topic network nodes listen on')
|
||||||
@@ -202,9 +203,20 @@ DEFINE_string('vpn_key_suffix',
|
|||||||
|
|
||||||
DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
|
DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
|
||||||
|
|
||||||
|
DEFINE_string('sql_connection',
|
||||||
|
'sqlite:///%s/nova.sqlite' % os.path.abspath("./"),
|
||||||
|
'connection string for sql database')
|
||||||
|
|
||||||
|
DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager',
|
||||||
|
'Manager for compute')
|
||||||
|
DEFINE_string('network_manager', 'nova.network.manager.VlanManager',
|
||||||
|
'Manager for network')
|
||||||
|
DEFINE_string('volume_manager', 'nova.volume.manager.AOEManager',
|
||||||
|
'Manager for volume')
|
||||||
|
|
||||||
|
DEFINE_string('host', socket.gethostname(),
|
||||||
|
'name of this node')
|
||||||
|
|
||||||
# UNUSED
|
# UNUSED
|
||||||
DEFINE_string('node_availability_zone', 'nova',
|
DEFINE_string('node_availability_zone', 'nova',
|
||||||
'availability zone of this node')
|
'availability zone of this node')
|
||||||
DEFINE_string('node_name', socket.gethostname(),
|
|
||||||
'name of this node')
|
|
||||||
|
|
||||||
|
|||||||
@@ -18,9 +18,10 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Process pool, still buggy right now.
|
Process pool using twisted threading
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
import StringIO
|
import StringIO
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
@@ -127,6 +128,7 @@ def get_process_output(executable, args=None, env=None, path=None,
|
|||||||
cmd = executable
|
cmd = executable
|
||||||
if args:
|
if args:
|
||||||
cmd = " ".join([cmd] + args)
|
cmd = " ".join([cmd] + args)
|
||||||
|
logging.debug("Running cmd: %s", cmd)
|
||||||
process_handler = BackRelayWithInput(
|
process_handler = BackRelayWithInput(
|
||||||
deferred,
|
deferred,
|
||||||
cmd,
|
cmd,
|
||||||
|
|||||||
@@ -44,6 +44,8 @@ flags.DEFINE_bool('use_syslog', True, 'output to syslog when daemonizing')
|
|||||||
flags.DEFINE_string('logfile', None, 'log file to output to')
|
flags.DEFINE_string('logfile', None, 'log file to output to')
|
||||||
flags.DEFINE_string('pidfile', None, 'pid file to output to')
|
flags.DEFINE_string('pidfile', None, 'pid file to output to')
|
||||||
flags.DEFINE_string('working_directory', './', 'working directory...')
|
flags.DEFINE_string('working_directory', './', 'working directory...')
|
||||||
|
flags.DEFINE_integer('uid', os.getuid(), 'uid under which to run')
|
||||||
|
flags.DEFINE_integer('gid', os.getgid(), 'gid under which to run')
|
||||||
|
|
||||||
|
|
||||||
def stop(pidfile):
|
def stop(pidfile):
|
||||||
@@ -135,6 +137,8 @@ def daemonize(args, name, main):
|
|||||||
threaded=False),
|
threaded=False),
|
||||||
stdin=stdin,
|
stdin=stdin,
|
||||||
stdout=stdout,
|
stdout=stdout,
|
||||||
stderr=stderr
|
stderr=stderr,
|
||||||
|
uid=FLAGS.uid,
|
||||||
|
gid=FLAGS.gid
|
||||||
):
|
):
|
||||||
main(args)
|
main(args)
|
||||||
|
|||||||
@@ -33,8 +33,6 @@ class Context(object):
|
|||||||
class AccessTestCase(test.BaseTestCase):
|
class AccessTestCase(test.BaseTestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(AccessTestCase, self).setUp()
|
super(AccessTestCase, self).setUp()
|
||||||
FLAGS.connection_type = 'fake'
|
|
||||||
FLAGS.fake_storage = True
|
|
||||||
um = manager.AuthManager()
|
um = manager.AuthManager()
|
||||||
# Make test users
|
# Make test users
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -16,6 +16,8 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
"""Unit tests for the API endpoint"""
|
||||||
|
|
||||||
import boto
|
import boto
|
||||||
from boto.ec2 import regioninfo
|
from boto.ec2 import regioninfo
|
||||||
import httplib
|
import httplib
|
||||||
@@ -38,7 +40,15 @@ FLAGS = flags.FLAGS
|
|||||||
# circuit boto calls and feed them into our tornado handlers,
|
# circuit boto calls and feed them into our tornado handlers,
|
||||||
# it's pretty damn circuitous so apologies if you have to fix
|
# it's pretty damn circuitous so apologies if you have to fix
|
||||||
# a bug in it
|
# a bug in it
|
||||||
def boto_to_tornado(method, path, headers, data, host, connection=None):
|
# NOTE(jaypipes) The pylint disables here are for R0913 (too many args) which
|
||||||
|
# isn't controllable since boto's HTTPRequest needs that many
|
||||||
|
# args, and for the version-differentiated import of tornado's
|
||||||
|
# httputil.
|
||||||
|
# NOTE(jaypipes): The disable-msg=E1101 and E1103 below is because pylint is
|
||||||
|
# unable to introspect the deferred's return value properly
|
||||||
|
|
||||||
|
def boto_to_tornado(method, path, headers, data, # pylint: disable-msg=R0913
|
||||||
|
host, connection=None):
|
||||||
""" translate boto requests into tornado requests
|
""" translate boto requests into tornado requests
|
||||||
|
|
||||||
connection should be a FakeTornadoHttpConnection instance
|
connection should be a FakeTornadoHttpConnection instance
|
||||||
@@ -46,7 +56,7 @@ def boto_to_tornado(method, path, headers, data, host, connection=None):
|
|||||||
try:
|
try:
|
||||||
headers = httpserver.HTTPHeaders()
|
headers = httpserver.HTTPHeaders()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
from tornado import httputil
|
from tornado import httputil # pylint: disable-msg=E0611
|
||||||
headers = httputil.HTTPHeaders()
|
headers = httputil.HTTPHeaders()
|
||||||
for k, v in headers.iteritems():
|
for k, v in headers.iteritems():
|
||||||
headers[k] = v
|
headers[k] = v
|
||||||
@@ -61,9 +71,9 @@ def boto_to_tornado(method, path, headers, data, host, connection=None):
|
|||||||
return req
|
return req
|
||||||
|
|
||||||
|
|
||||||
def raw_to_httpresponse(s):
|
def raw_to_httpresponse(response_string):
|
||||||
"""translate a raw tornado http response into an httplib.HTTPResponse"""
|
"""translate a raw tornado http response into an httplib.HTTPResponse"""
|
||||||
sock = FakeHttplibSocket(s)
|
sock = FakeHttplibSocket(response_string)
|
||||||
resp = httplib.HTTPResponse(sock)
|
resp = httplib.HTTPResponse(sock)
|
||||||
resp.begin()
|
resp.begin()
|
||||||
return resp
|
return resp
|
||||||
@@ -71,47 +81,54 @@ def raw_to_httpresponse(s):
|
|||||||
|
|
||||||
class FakeHttplibSocket(object):
|
class FakeHttplibSocket(object):
|
||||||
"""a fake socket implementation for httplib.HTTPResponse, trivial"""
|
"""a fake socket implementation for httplib.HTTPResponse, trivial"""
|
||||||
def __init__(self, s):
|
def __init__(self, response_string):
|
||||||
self.fp = StringIO.StringIO(s)
|
self._buffer = StringIO.StringIO(response_string)
|
||||||
|
|
||||||
def makefile(self, mode, other):
|
def makefile(self, _mode, _other):
|
||||||
return self.fp
|
"""Returns the socket's internal buffer"""
|
||||||
|
return self._buffer
|
||||||
|
|
||||||
|
|
||||||
class FakeTornadoStream(object):
|
class FakeTornadoStream(object):
|
||||||
"""a fake stream to satisfy tornado's assumptions, trivial"""
|
"""a fake stream to satisfy tornado's assumptions, trivial"""
|
||||||
def set_close_callback(self, f):
|
def set_close_callback(self, _func):
|
||||||
|
"""Dummy callback for stream"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class FakeTornadoConnection(object):
|
class FakeTornadoConnection(object):
|
||||||
""" a fake connection object for tornado to pass to its handlers
|
"""A fake connection object for tornado to pass to its handlers
|
||||||
|
|
||||||
web requests are expected to write to this as they get data and call
|
web requests are expected to write to this as they get data and call
|
||||||
finish when they are done with the request, we buffer the writes and
|
finish when they are done with the request, we buffer the writes and
|
||||||
kick off a callback when it is done so that we can feed the result back
|
kick off a callback when it is done so that we can feed the result back
|
||||||
into boto.
|
into boto.
|
||||||
"""
|
"""
|
||||||
def __init__(self, d):
|
def __init__(self, deferred):
|
||||||
self.d = d
|
self._deferred = deferred
|
||||||
self._buffer = StringIO.StringIO()
|
self._buffer = StringIO.StringIO()
|
||||||
|
|
||||||
def write(self, chunk):
|
def write(self, chunk):
|
||||||
|
"""Writes a chunk of data to the internal buffer"""
|
||||||
self._buffer.write(chunk)
|
self._buffer.write(chunk)
|
||||||
|
|
||||||
def finish(self):
|
def finish(self):
|
||||||
s = self._buffer.getvalue()
|
"""Finalizes the connection and returns the buffered data via the
|
||||||
self.d.callback(s)
|
deferred callback.
|
||||||
|
"""
|
||||||
|
data = self._buffer.getvalue()
|
||||||
|
self._deferred.callback(data)
|
||||||
|
|
||||||
xheaders = None
|
xheaders = None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def stream(self):
|
def stream(self): # pylint: disable-msg=R0201
|
||||||
|
"""Required property for interfacing with tornado"""
|
||||||
return FakeTornadoStream()
|
return FakeTornadoStream()
|
||||||
|
|
||||||
|
|
||||||
class FakeHttplibConnection(object):
|
class FakeHttplibConnection(object):
|
||||||
""" a fake httplib.HTTPConnection for boto to use
|
"""A fake httplib.HTTPConnection for boto to use
|
||||||
|
|
||||||
requests made via this connection actually get translated and routed into
|
requests made via this connection actually get translated and routed into
|
||||||
our tornado app, we then wait for the response and turn it back into
|
our tornado app, we then wait for the response and turn it back into
|
||||||
@@ -123,7 +140,9 @@ class FakeHttplibConnection(object):
|
|||||||
self.deferred = defer.Deferred()
|
self.deferred = defer.Deferred()
|
||||||
|
|
||||||
def request(self, method, path, data, headers):
|
def request(self, method, path, data, headers):
|
||||||
req = boto_to_tornado
|
"""Creates a connection to a fake tornado and sets
|
||||||
|
up a deferred request with the supplied data and
|
||||||
|
headers"""
|
||||||
conn = FakeTornadoConnection(self.deferred)
|
conn = FakeTornadoConnection(self.deferred)
|
||||||
request = boto_to_tornado(connection=conn,
|
request = boto_to_tornado(connection=conn,
|
||||||
method=method,
|
method=method,
|
||||||
@@ -131,12 +150,16 @@ class FakeHttplibConnection(object):
|
|||||||
headers=headers,
|
headers=headers,
|
||||||
data=data,
|
data=data,
|
||||||
host=self.host)
|
host=self.host)
|
||||||
handler = self.app(request)
|
self.app(request)
|
||||||
self.deferred.addCallback(raw_to_httpresponse)
|
self.deferred.addCallback(raw_to_httpresponse)
|
||||||
|
|
||||||
def getresponse(self):
|
def getresponse(self):
|
||||||
|
"""A bit of deferred magic for catching the response
|
||||||
|
from the previously deferred request"""
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _waiter():
|
def _waiter():
|
||||||
|
"""Callback that simply yields the deferred's
|
||||||
|
return value."""
|
||||||
result = yield self.deferred
|
result = yield self.deferred
|
||||||
defer.returnValue(result)
|
defer.returnValue(result)
|
||||||
d = _waiter()
|
d = _waiter()
|
||||||
@@ -144,14 +167,16 @@ class FakeHttplibConnection(object):
|
|||||||
# this deferred has already been called by the time
|
# this deferred has already been called by the time
|
||||||
# we get here, we are going to cheat and return
|
# we get here, we are going to cheat and return
|
||||||
# the result of the callback
|
# the result of the callback
|
||||||
return d.result
|
return d.result # pylint: disable-msg=E1101
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
|
"""Required for compatibility with boto/tornado"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class ApiEc2TestCase(test.BaseTestCase):
|
class ApiEc2TestCase(test.BaseTestCase):
|
||||||
def setUp(self):
|
"""Unit test for the cloud controller on an EC2 API"""
|
||||||
|
def setUp(self): # pylint: disable-msg=C0103,C0111
|
||||||
super(ApiEc2TestCase, self).setUp()
|
super(ApiEc2TestCase, self).setUp()
|
||||||
|
|
||||||
self.manager = manager.AuthManager()
|
self.manager = manager.AuthManager()
|
||||||
@@ -171,12 +196,16 @@ class ApiEc2TestCase(test.BaseTestCase):
|
|||||||
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
|
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
|
||||||
|
|
||||||
def expect_http(self, host=None, is_secure=False):
|
def expect_http(self, host=None, is_secure=False):
|
||||||
|
"""Returns a new EC2 connection"""
|
||||||
http = FakeHttplibConnection(
|
http = FakeHttplibConnection(
|
||||||
self.app, '%s:%d' % (self.host, FLAGS.cc_port), False)
|
self.app, '%s:%d' % (self.host, FLAGS.cc_port), False)
|
||||||
|
# pylint: disable-msg=E1103
|
||||||
self.ec2.new_http_connection(host, is_secure).AndReturn(http)
|
self.ec2.new_http_connection(host, is_secure).AndReturn(http)
|
||||||
return http
|
return http
|
||||||
|
|
||||||
def test_describe_instances(self):
|
def test_describe_instances(self):
|
||||||
|
"""Test that, after creating a user and a project, the describe
|
||||||
|
instances call to the API works properly"""
|
||||||
self.expect_http()
|
self.expect_http()
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
user = self.manager.create_user('fake', 'fake', 'fake')
|
user = self.manager.create_user('fake', 'fake', 'fake')
|
||||||
@@ -187,14 +216,18 @@ class ApiEc2TestCase(test.BaseTestCase):
|
|||||||
|
|
||||||
|
|
||||||
def test_get_all_key_pairs(self):
|
def test_get_all_key_pairs(self):
|
||||||
|
"""Test that, after creating a user and project and generating
|
||||||
|
a key pair, that the API call to list key pairs works properly"""
|
||||||
self.expect_http()
|
self.expect_http()
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") for x in range(random.randint(4, 8)))
|
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
|
||||||
|
for x in range(random.randint(4, 8)))
|
||||||
user = self.manager.create_user('fake', 'fake', 'fake')
|
user = self.manager.create_user('fake', 'fake', 'fake')
|
||||||
project = self.manager.create_project('fake', 'fake', 'fake')
|
project = self.manager.create_project('fake', 'fake', 'fake')
|
||||||
self.manager.generate_key_pair(user.id, keyname)
|
self.manager.generate_key_pair(user.id, keyname)
|
||||||
|
|
||||||
rv = self.ec2.get_all_key_pairs()
|
rv = self.ec2.get_all_key_pairs()
|
||||||
self.assertTrue(filter(lambda k: k.name == keyname, rv))
|
results = [k for k in rv if k.name == keyname]
|
||||||
|
self.assertEquals(len(results), 1)
|
||||||
self.manager.delete_project(project)
|
self.manager.delete_project(project)
|
||||||
self.manager.delete_user(user)
|
self.manager.delete_user(user)
|
||||||
|
|||||||
@@ -32,11 +32,9 @@ FLAGS = flags.FLAGS
|
|||||||
|
|
||||||
|
|
||||||
class AuthTestCase(test.BaseTestCase):
|
class AuthTestCase(test.BaseTestCase):
|
||||||
flush_db = False
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(AuthTestCase, self).setUp()
|
super(AuthTestCase, self).setUp()
|
||||||
self.flags(connection_type='fake',
|
self.flags(connection_type='fake')
|
||||||
fake_storage=True)
|
|
||||||
self.manager = manager.AuthManager()
|
self.manager = manager.AuthManager()
|
||||||
|
|
||||||
def test_001_can_create_users(self):
|
def test_001_can_create_users(self):
|
||||||
|
|||||||
@@ -27,8 +27,9 @@ from xml.etree import ElementTree
|
|||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
from nova import test
|
from nova import test
|
||||||
|
from nova import utils
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.compute import service
|
from nova.compute import power_state
|
||||||
from nova.endpoint import api
|
from nova.endpoint import api
|
||||||
from nova.endpoint import cloud
|
from nova.endpoint import cloud
|
||||||
|
|
||||||
@@ -39,21 +40,16 @@ FLAGS = flags.FLAGS
|
|||||||
class CloudTestCase(test.BaseTestCase):
|
class CloudTestCase(test.BaseTestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(CloudTestCase, self).setUp()
|
super(CloudTestCase, self).setUp()
|
||||||
self.flags(connection_type='fake',
|
self.flags(connection_type='fake')
|
||||||
fake_storage=True)
|
|
||||||
|
|
||||||
self.conn = rpc.Connection.instance()
|
self.conn = rpc.Connection.instance()
|
||||||
logging.getLogger().setLevel(logging.DEBUG)
|
logging.getLogger().setLevel(logging.DEBUG)
|
||||||
|
|
||||||
# set up our cloud
|
# set up our cloud
|
||||||
self.cloud = cloud.CloudController()
|
self.cloud = cloud.CloudController()
|
||||||
self.cloud_consumer = rpc.AdapterConsumer(connection=self.conn,
|
|
||||||
topic=FLAGS.cloud_topic,
|
|
||||||
proxy=self.cloud)
|
|
||||||
self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop))
|
|
||||||
|
|
||||||
# set up a service
|
# set up a service
|
||||||
self.compute = service.ComputeService()
|
self.compute = utils.import_class(FLAGS.compute_manager)
|
||||||
self.compute_consumer = rpc.AdapterConsumer(connection=self.conn,
|
self.compute_consumer = rpc.AdapterConsumer(connection=self.conn,
|
||||||
topic=FLAGS.compute_topic,
|
topic=FLAGS.compute_topic,
|
||||||
proxy=self.compute)
|
proxy=self.compute)
|
||||||
@@ -99,7 +95,7 @@ class CloudTestCase(test.BaseTestCase):
|
|||||||
rv = yield defer.succeed(time.sleep(1))
|
rv = yield defer.succeed(time.sleep(1))
|
||||||
info = self.cloud._get_instance(instance['instance_id'])
|
info = self.cloud._get_instance(instance['instance_id'])
|
||||||
logging.debug(info['state'])
|
logging.debug(info['state'])
|
||||||
if info['state'] == node.Instance.RUNNING:
|
if info['state'] == power_state.RUNNING:
|
||||||
break
|
break
|
||||||
self.assert_(rv)
|
self.assert_(rv)
|
||||||
|
|
||||||
|
|||||||
@@ -15,113 +15,115 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
"""
|
||||||
|
Tests For Compute
|
||||||
|
"""
|
||||||
|
|
||||||
|
import datetime
|
||||||
import logging
|
import logging
|
||||||
import time
|
|
||||||
from twisted.internet import defer
|
|
||||||
from xml.etree import ElementTree
|
|
||||||
|
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.compute import model
|
from nova.auth import manager
|
||||||
from nova.compute import service
|
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
class InstanceXmlTestCase(test.TrialTestCase):
|
class ComputeTestCase(test.TrialTestCase):
|
||||||
# @defer.inlineCallbacks
|
"""Test case for compute"""
|
||||||
def test_serialization(self):
|
def setUp(self): # pylint: disable-msg=C0103
|
||||||
# TODO: Reimplement this, it doesn't make sense in redis-land
|
|
||||||
return
|
|
||||||
|
|
||||||
# instance_id = 'foo'
|
|
||||||
# first_node = node.Node()
|
|
||||||
# inst = yield first_node.run_instance(instance_id)
|
|
||||||
#
|
|
||||||
# # force the state so that we can verify that it changes
|
|
||||||
# inst._s['state'] = node.Instance.NOSTATE
|
|
||||||
# xml = inst.toXml()
|
|
||||||
# self.assert_(ElementTree.parse(StringIO.StringIO(xml)))
|
|
||||||
#
|
|
||||||
# second_node = node.Node()
|
|
||||||
# new_inst = node.Instance.fromXml(second_node._conn, pool=second_node._pool, xml=xml)
|
|
||||||
# self.assertEqual(new_inst.state, node.Instance.RUNNING)
|
|
||||||
# rv = yield first_node.terminate_instance(instance_id)
|
|
||||||
|
|
||||||
|
|
||||||
class ComputeConnectionTestCase(test.TrialTestCase):
|
|
||||||
def setUp(self):
|
|
||||||
logging.getLogger().setLevel(logging.DEBUG)
|
logging.getLogger().setLevel(logging.DEBUG)
|
||||||
super(ComputeConnectionTestCase, self).setUp()
|
super(ComputeTestCase, self).setUp()
|
||||||
self.flags(connection_type='fake',
|
self.flags(connection_type='fake')
|
||||||
fake_storage=True)
|
self.compute = utils.import_object(FLAGS.compute_manager)
|
||||||
self.compute = service.ComputeService()
|
self.manager = manager.AuthManager()
|
||||||
|
self.user = self.manager.create_user('fake', 'fake', 'fake')
|
||||||
|
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
||||||
|
self.context = None
|
||||||
|
|
||||||
def create_instance(self):
|
def tearDown(self): # pylint: disable-msg=C0103
|
||||||
instdir = model.InstanceDirectory()
|
self.manager.delete_user(self.user)
|
||||||
inst = instdir.new()
|
self.manager.delete_project(self.project)
|
||||||
# TODO(ja): add ami, ari, aki, user_data
|
|
||||||
|
def _create_instance(self):
|
||||||
|
"""Create a test instance"""
|
||||||
|
inst = {}
|
||||||
|
inst['image_id'] = 'ami-test'
|
||||||
inst['reservation_id'] = 'r-fakeres'
|
inst['reservation_id'] = 'r-fakeres'
|
||||||
inst['launch_time'] = '10'
|
inst['launch_time'] = '10'
|
||||||
inst['user_id'] = 'fake'
|
inst['user_id'] = self.user.id
|
||||||
inst['project_id'] = 'fake'
|
inst['project_id'] = self.project.id
|
||||||
inst['instance_type'] = 'm1.tiny'
|
inst['instance_type'] = 'm1.tiny'
|
||||||
inst['node_name'] = FLAGS.node_name
|
|
||||||
inst['mac_address'] = utils.generate_mac()
|
inst['mac_address'] = utils.generate_mac()
|
||||||
inst['ami_launch_index'] = 0
|
inst['ami_launch_index'] = 0
|
||||||
inst.save()
|
return db.instance_create(self.context, inst)['id']
|
||||||
return inst['instance_id']
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_run_describe_terminate(self):
|
def test_run_terminate(self):
|
||||||
instance_id = self.create_instance()
|
"""Make sure it is possible to run and terminate instance"""
|
||||||
|
instance_id = self._create_instance()
|
||||||
|
|
||||||
rv = yield self.compute.run_instance(instance_id)
|
yield self.compute.run_instance(self.context, instance_id)
|
||||||
|
|
||||||
rv = yield self.compute.describe_instances()
|
instances = db.instance_get_all(None)
|
||||||
logging.info("Running instances: %s", rv)
|
logging.info("Running instances: %s", instances)
|
||||||
self.assertEqual(rv[instance_id].name, instance_id)
|
self.assertEqual(len(instances), 1)
|
||||||
|
|
||||||
rv = yield self.compute.terminate_instance(instance_id)
|
yield self.compute.terminate_instance(self.context, instance_id)
|
||||||
|
|
||||||
rv = yield self.compute.describe_instances()
|
instances = db.instance_get_all(None)
|
||||||
logging.info("After terminating instances: %s", rv)
|
logging.info("After terminating instances: %s", instances)
|
||||||
self.assertEqual(rv, {})
|
self.assertEqual(len(instances), 0)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def test_run_terminate_timestamps(self):
|
||||||
|
"""Make sure it is possible to run and terminate instance"""
|
||||||
|
instance_id = self._create_instance()
|
||||||
|
instance_ref = db.instance_get(self.context, instance_id)
|
||||||
|
self.assertEqual(instance_ref['launched_at'], None)
|
||||||
|
self.assertEqual(instance_ref['terminated_at'], None)
|
||||||
|
launch = datetime.datetime.utcnow()
|
||||||
|
yield self.compute.run_instance(self.context, instance_id)
|
||||||
|
instance_ref = db.instance_get(self.context, instance_id)
|
||||||
|
self.assert_(instance_ref['launched_at'] > launch)
|
||||||
|
self.assertEqual(instance_ref['terminated_at'], None)
|
||||||
|
terminate = datetime.datetime.utcnow()
|
||||||
|
yield self.compute.terminate_instance(self.context, instance_id)
|
||||||
|
instance_ref = db.instance_get({'deleted': True}, instance_id)
|
||||||
|
self.assert_(instance_ref['launched_at'] < terminate)
|
||||||
|
self.assert_(instance_ref['terminated_at'] > terminate)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_reboot(self):
|
def test_reboot(self):
|
||||||
instance_id = self.create_instance()
|
"""Ensure instance can be rebooted"""
|
||||||
rv = yield self.compute.run_instance(instance_id)
|
instance_id = self._create_instance()
|
||||||
|
yield self.compute.run_instance(self.context, instance_id)
|
||||||
rv = yield self.compute.describe_instances()
|
yield self.compute.reboot_instance(self.context, instance_id)
|
||||||
self.assertEqual(rv[instance_id].name, instance_id)
|
yield self.compute.terminate_instance(self.context, instance_id)
|
||||||
|
|
||||||
yield self.compute.reboot_instance(instance_id)
|
|
||||||
|
|
||||||
rv = yield self.compute.describe_instances()
|
|
||||||
self.assertEqual(rv[instance_id].name, instance_id)
|
|
||||||
rv = yield self.compute.terminate_instance(instance_id)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_console_output(self):
|
def test_console_output(self):
|
||||||
instance_id = self.create_instance()
|
"""Make sure we can get console output from instance"""
|
||||||
rv = yield self.compute.run_instance(instance_id)
|
instance_id = self._create_instance()
|
||||||
|
yield self.compute.run_instance(self.context, instance_id)
|
||||||
|
|
||||||
console = yield self.compute.get_console_output(instance_id)
|
console = yield self.compute.get_console_output(self.context,
|
||||||
|
instance_id)
|
||||||
self.assert_(console)
|
self.assert_(console)
|
||||||
rv = yield self.compute.terminate_instance(instance_id)
|
yield self.compute.terminate_instance(self.context, instance_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_run_instance_existing(self):
|
def test_run_instance_existing(self):
|
||||||
instance_id = self.create_instance()
|
"""Ensure failure when running an instance that already exists"""
|
||||||
rv = yield self.compute.run_instance(instance_id)
|
instance_id = self._create_instance()
|
||||||
|
yield self.compute.run_instance(self.context, instance_id)
|
||||||
rv = yield self.compute.describe_instances()
|
self.assertFailure(self.compute.run_instance(self.context,
|
||||||
self.assertEqual(rv[instance_id].name, instance_id)
|
instance_id),
|
||||||
|
exception.Error)
|
||||||
self.assertRaises(exception.Error, self.compute.run_instance, instance_id)
|
yield self.compute.terminate_instance(self.context, instance_id)
|
||||||
rv = yield self.compute.terminate_instance(instance_id)
|
|
||||||
|
|||||||
@@ -20,9 +20,20 @@ from nova import flags
|
|||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
flags.DECLARE('volume_driver', 'nova.volume.manager')
|
||||||
|
FLAGS.volume_driver = 'nova.volume.driver.FakeAOEDriver'
|
||||||
FLAGS.connection_type = 'fake'
|
FLAGS.connection_type = 'fake'
|
||||||
FLAGS.fake_storage = True
|
|
||||||
FLAGS.fake_rabbit = True
|
FLAGS.fake_rabbit = True
|
||||||
FLAGS.fake_network = True
|
|
||||||
FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
|
FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
|
||||||
|
flags.DECLARE('network_size', 'nova.network.manager')
|
||||||
|
flags.DECLARE('num_networks', 'nova.network.manager')
|
||||||
|
flags.DECLARE('fake_network', 'nova.network.manager')
|
||||||
|
FLAGS.network_size = 16
|
||||||
|
FLAGS.num_networks = 5
|
||||||
|
FLAGS.fake_network = True
|
||||||
|
flags.DECLARE('num_shelves', 'nova.volume.manager')
|
||||||
|
flags.DECLARE('blades_per_shelf', 'nova.volume.manager')
|
||||||
|
FLAGS.num_shelves = 2
|
||||||
|
FLAGS.blades_per_shelf = 4
|
||||||
FLAGS.verbose = True
|
FLAGS.verbose = True
|
||||||
|
FLAGS.sql_connection = 'sqlite:///nova.sqlite'
|
||||||
|
|||||||
@@ -1,292 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from datetime import datetime, timedelta
|
|
||||||
import logging
|
|
||||||
import time
|
|
||||||
|
|
||||||
from nova import flags
|
|
||||||
from nova import test
|
|
||||||
from nova import utils
|
|
||||||
from nova.compute import model
|
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
|
|
||||||
|
|
||||||
class ModelTestCase(test.TrialTestCase):
|
|
||||||
def setUp(self):
|
|
||||||
super(ModelTestCase, self).setUp()
|
|
||||||
self.flags(connection_type='fake',
|
|
||||||
fake_storage=True)
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
model.Instance('i-test').destroy()
|
|
||||||
model.Host('testhost').destroy()
|
|
||||||
model.Daemon('testhost', 'nova-testdaemon').destroy()
|
|
||||||
|
|
||||||
def create_instance(self):
|
|
||||||
inst = model.Instance('i-test')
|
|
||||||
inst['reservation_id'] = 'r-test'
|
|
||||||
inst['launch_time'] = '10'
|
|
||||||
inst['user_id'] = 'fake'
|
|
||||||
inst['project_id'] = 'fake'
|
|
||||||
inst['instance_type'] = 'm1.tiny'
|
|
||||||
inst['mac_address'] = utils.generate_mac()
|
|
||||||
inst['ami_launch_index'] = 0
|
|
||||||
inst['private_dns_name'] = '10.0.0.1'
|
|
||||||
inst.save()
|
|
||||||
return inst
|
|
||||||
|
|
||||||
def create_host(self):
|
|
||||||
host = model.Host('testhost')
|
|
||||||
host.save()
|
|
||||||
return host
|
|
||||||
|
|
||||||
def create_daemon(self):
|
|
||||||
daemon = model.Daemon('testhost', 'nova-testdaemon')
|
|
||||||
daemon.save()
|
|
||||||
return daemon
|
|
||||||
|
|
||||||
def create_session_token(self):
|
|
||||||
session_token = model.SessionToken('tk12341234')
|
|
||||||
session_token['user'] = 'testuser'
|
|
||||||
session_token.save()
|
|
||||||
return session_token
|
|
||||||
|
|
||||||
def test_create_instance(self):
|
|
||||||
"""store with create_instace, then test that a load finds it"""
|
|
||||||
instance = self.create_instance()
|
|
||||||
old = model.Instance(instance.identifier)
|
|
||||||
self.assertFalse(old.is_new_record())
|
|
||||||
|
|
||||||
def test_delete_instance(self):
|
|
||||||
"""create, then destroy, then make sure loads a new record"""
|
|
||||||
instance = self.create_instance()
|
|
||||||
instance.destroy()
|
|
||||||
newinst = model.Instance('i-test')
|
|
||||||
self.assertTrue(newinst.is_new_record())
|
|
||||||
|
|
||||||
def test_instance_added_to_set(self):
|
|
||||||
"""create, then check that it is listed in global set"""
|
|
||||||
instance = self.create_instance()
|
|
||||||
found = False
|
|
||||||
for x in model.InstanceDirectory().all:
|
|
||||||
if x.identifier == 'i-test':
|
|
||||||
found = True
|
|
||||||
self.assert_(found)
|
|
||||||
|
|
||||||
def test_instance_associates_project(self):
|
|
||||||
"""create, then check that it is listed for the project"""
|
|
||||||
instance = self.create_instance()
|
|
||||||
found = False
|
|
||||||
for x in model.InstanceDirectory().by_project(instance.project):
|
|
||||||
if x.identifier == 'i-test':
|
|
||||||
found = True
|
|
||||||
self.assert_(found)
|
|
||||||
|
|
||||||
def test_instance_associates_ip(self):
|
|
||||||
"""create, then check that it is listed for the ip"""
|
|
||||||
instance = self.create_instance()
|
|
||||||
found = False
|
|
||||||
x = model.InstanceDirectory().by_ip(instance['private_dns_name'])
|
|
||||||
self.assertEqual(x.identifier, 'i-test')
|
|
||||||
|
|
||||||
def test_instance_associates_node(self):
|
|
||||||
"""create, then check that it is listed for the node_name"""
|
|
||||||
instance = self.create_instance()
|
|
||||||
found = False
|
|
||||||
for x in model.InstanceDirectory().by_node(FLAGS.node_name):
|
|
||||||
if x.identifier == 'i-test':
|
|
||||||
found = True
|
|
||||||
self.assertFalse(found)
|
|
||||||
instance['node_name'] = 'test_node'
|
|
||||||
instance.save()
|
|
||||||
for x in model.InstanceDirectory().by_node('test_node'):
|
|
||||||
if x.identifier == 'i-test':
|
|
||||||
found = True
|
|
||||||
self.assert_(found)
|
|
||||||
|
|
||||||
|
|
||||||
def test_host_class_finds_hosts(self):
|
|
||||||
host = self.create_host()
|
|
||||||
self.assertEqual('testhost', model.Host.lookup('testhost').identifier)
|
|
||||||
|
|
||||||
def test_host_class_doesnt_find_missing_hosts(self):
|
|
||||||
rv = model.Host.lookup('woahnelly')
|
|
||||||
self.assertEqual(None, rv)
|
|
||||||
|
|
||||||
def test_create_host(self):
|
|
||||||
"""store with create_host, then test that a load finds it"""
|
|
||||||
host = self.create_host()
|
|
||||||
old = model.Host(host.identifier)
|
|
||||||
self.assertFalse(old.is_new_record())
|
|
||||||
|
|
||||||
def test_delete_host(self):
|
|
||||||
"""create, then destroy, then make sure loads a new record"""
|
|
||||||
instance = self.create_host()
|
|
||||||
instance.destroy()
|
|
||||||
newinst = model.Host('testhost')
|
|
||||||
self.assertTrue(newinst.is_new_record())
|
|
||||||
|
|
||||||
def test_host_added_to_set(self):
|
|
||||||
"""create, then check that it is included in list"""
|
|
||||||
instance = self.create_host()
|
|
||||||
found = False
|
|
||||||
for x in model.Host.all():
|
|
||||||
if x.identifier == 'testhost':
|
|
||||||
found = True
|
|
||||||
self.assert_(found)
|
|
||||||
|
|
||||||
def test_create_daemon_two_args(self):
|
|
||||||
"""create a daemon with two arguments"""
|
|
||||||
d = self.create_daemon()
|
|
||||||
d = model.Daemon('testhost', 'nova-testdaemon')
|
|
||||||
self.assertFalse(d.is_new_record())
|
|
||||||
|
|
||||||
def test_create_daemon_single_arg(self):
|
|
||||||
"""Create a daemon using the combined host:bin format"""
|
|
||||||
d = model.Daemon("testhost:nova-testdaemon")
|
|
||||||
d.save()
|
|
||||||
d = model.Daemon('testhost:nova-testdaemon')
|
|
||||||
self.assertFalse(d.is_new_record())
|
|
||||||
|
|
||||||
def test_equality_of_daemon_single_and_double_args(self):
|
|
||||||
"""Create a daemon using the combined host:bin arg, find with 2"""
|
|
||||||
d = model.Daemon("testhost:nova-testdaemon")
|
|
||||||
d.save()
|
|
||||||
d = model.Daemon('testhost', 'nova-testdaemon')
|
|
||||||
self.assertFalse(d.is_new_record())
|
|
||||||
|
|
||||||
def test_equality_daemon_of_double_and_single_args(self):
|
|
||||||
"""Create a daemon using the combined host:bin arg, find with 2"""
|
|
||||||
d = self.create_daemon()
|
|
||||||
d = model.Daemon('testhost:nova-testdaemon')
|
|
||||||
self.assertFalse(d.is_new_record())
|
|
||||||
|
|
||||||
def test_delete_daemon(self):
|
|
||||||
"""create, then destroy, then make sure loads a new record"""
|
|
||||||
instance = self.create_daemon()
|
|
||||||
instance.destroy()
|
|
||||||
newinst = model.Daemon('testhost', 'nova-testdaemon')
|
|
||||||
self.assertTrue(newinst.is_new_record())
|
|
||||||
|
|
||||||
def test_daemon_heartbeat(self):
|
|
||||||
"""Create a daemon, sleep, heartbeat, check for update"""
|
|
||||||
d = self.create_daemon()
|
|
||||||
ts = d['updated_at']
|
|
||||||
time.sleep(2)
|
|
||||||
d.heartbeat()
|
|
||||||
d2 = model.Daemon('testhost', 'nova-testdaemon')
|
|
||||||
ts2 = d2['updated_at']
|
|
||||||
self.assert_(ts2 > ts)
|
|
||||||
|
|
||||||
def test_daemon_added_to_set(self):
|
|
||||||
"""create, then check that it is included in list"""
|
|
||||||
instance = self.create_daemon()
|
|
||||||
found = False
|
|
||||||
for x in model.Daemon.all():
|
|
||||||
if x.identifier == 'testhost:nova-testdaemon':
|
|
||||||
found = True
|
|
||||||
self.assert_(found)
|
|
||||||
|
|
||||||
def test_daemon_associates_host(self):
|
|
||||||
"""create, then check that it is listed for the host"""
|
|
||||||
instance = self.create_daemon()
|
|
||||||
found = False
|
|
||||||
for x in model.Daemon.by_host('testhost'):
|
|
||||||
if x.identifier == 'testhost:nova-testdaemon':
|
|
||||||
found = True
|
|
||||||
self.assertTrue(found)
|
|
||||||
|
|
||||||
def test_create_session_token(self):
|
|
||||||
"""create"""
|
|
||||||
d = self.create_session_token()
|
|
||||||
d = model.SessionToken(d.token)
|
|
||||||
self.assertFalse(d.is_new_record())
|
|
||||||
|
|
||||||
def test_delete_session_token(self):
|
|
||||||
"""create, then destroy, then make sure loads a new record"""
|
|
||||||
instance = self.create_session_token()
|
|
||||||
instance.destroy()
|
|
||||||
newinst = model.SessionToken(instance.token)
|
|
||||||
self.assertTrue(newinst.is_new_record())
|
|
||||||
|
|
||||||
def test_session_token_added_to_set(self):
|
|
||||||
"""create, then check that it is included in list"""
|
|
||||||
instance = self.create_session_token()
|
|
||||||
found = False
|
|
||||||
for x in model.SessionToken.all():
|
|
||||||
if x.identifier == instance.token:
|
|
||||||
found = True
|
|
||||||
self.assert_(found)
|
|
||||||
|
|
||||||
def test_session_token_associates_user(self):
|
|
||||||
"""create, then check that it is listed for the user"""
|
|
||||||
instance = self.create_session_token()
|
|
||||||
found = False
|
|
||||||
for x in model.SessionToken.associated_to('user', 'testuser'):
|
|
||||||
if x.identifier == instance.identifier:
|
|
||||||
found = True
|
|
||||||
self.assertTrue(found)
|
|
||||||
|
|
||||||
def test_session_token_generation(self):
|
|
||||||
instance = model.SessionToken.generate('username', 'TokenType')
|
|
||||||
self.assertFalse(instance.is_new_record())
|
|
||||||
|
|
||||||
def test_find_generated_session_token(self):
|
|
||||||
instance = model.SessionToken.generate('username', 'TokenType')
|
|
||||||
found = model.SessionToken.lookup(instance.identifier)
|
|
||||||
self.assert_(found)
|
|
||||||
|
|
||||||
def test_update_session_token_expiry(self):
|
|
||||||
instance = model.SessionToken('tk12341234')
|
|
||||||
oldtime = datetime.utcnow()
|
|
||||||
instance['expiry'] = oldtime.strftime(utils.TIME_FORMAT)
|
|
||||||
instance.update_expiry()
|
|
||||||
expiry = utils.parse_isotime(instance['expiry'])
|
|
||||||
self.assert_(expiry > datetime.utcnow())
|
|
||||||
|
|
||||||
def test_session_token_lookup_when_expired(self):
|
|
||||||
instance = model.SessionToken.generate("testuser")
|
|
||||||
instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
|
|
||||||
instance.save()
|
|
||||||
inst = model.SessionToken.lookup(instance.identifier)
|
|
||||||
self.assertFalse(inst)
|
|
||||||
|
|
||||||
def test_session_token_lookup_when_not_expired(self):
|
|
||||||
instance = model.SessionToken.generate("testuser")
|
|
||||||
inst = model.SessionToken.lookup(instance.identifier)
|
|
||||||
self.assert_(inst)
|
|
||||||
|
|
||||||
def test_session_token_is_expired_when_expired(self):
|
|
||||||
instance = model.SessionToken.generate("testuser")
|
|
||||||
instance['expiry'] = datetime.utcnow().strftime(utils.TIME_FORMAT)
|
|
||||||
self.assert_(instance.is_expired())
|
|
||||||
|
|
||||||
def test_session_token_is_expired_when_not_expired(self):
|
|
||||||
instance = model.SessionToken.generate("testuser")
|
|
||||||
self.assertFalse(instance.is_expired())
|
|
||||||
|
|
||||||
def test_session_token_ttl(self):
|
|
||||||
instance = model.SessionToken.generate("testuser")
|
|
||||||
now = datetime.utcnow()
|
|
||||||
delta = timedelta(hours=1)
|
|
||||||
instance['expiry'] = (now + delta).strftime(utils.TIME_FORMAT)
|
|
||||||
# give 5 seconds of fuzziness
|
|
||||||
self.assert_(abs(instance.ttl() - FLAGS.auth_token_ttl) < 5)
|
|
||||||
@@ -22,14 +22,13 @@ import IPy
|
|||||||
import os
|
import os
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
from nova import db
|
||||||
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.network import model
|
from nova.endpoint import api
|
||||||
from nova.network import service
|
|
||||||
from nova.network import vpn
|
|
||||||
from nova.network.exception import NoMoreAddresses
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
@@ -41,169 +40,180 @@ class NetworkTestCase(test.TrialTestCase):
|
|||||||
# NOTE(vish): if you change these flags, make sure to change the
|
# NOTE(vish): if you change these flags, make sure to change the
|
||||||
# flags in the corresponding section in nova-dhcpbridge
|
# flags in the corresponding section in nova-dhcpbridge
|
||||||
self.flags(connection_type='fake',
|
self.flags(connection_type='fake',
|
||||||
fake_storage=True,
|
|
||||||
fake_network=True,
|
fake_network=True,
|
||||||
auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
|
auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
|
||||||
network_size=32)
|
network_size=16,
|
||||||
|
num_networks=5)
|
||||||
logging.getLogger().setLevel(logging.DEBUG)
|
logging.getLogger().setLevel(logging.DEBUG)
|
||||||
self.manager = manager.AuthManager()
|
self.manager = manager.AuthManager()
|
||||||
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
|
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
|
||||||
self.projects = []
|
self.projects = []
|
||||||
self.projects.append(self.manager.create_project('netuser',
|
self.network = utils.import_object(FLAGS.network_manager)
|
||||||
'netuser',
|
self.context = api.APIRequestContext(None, project=None, user=self.user)
|
||||||
'netuser'))
|
for i in range(5):
|
||||||
for i in range(0, 6):
|
|
||||||
name = 'project%s' % i
|
name = 'project%s' % i
|
||||||
self.projects.append(self.manager.create_project(name,
|
self.projects.append(self.manager.create_project(name,
|
||||||
'netuser',
|
'netuser',
|
||||||
name))
|
name))
|
||||||
vpn.NetworkData.create(self.projects[i].id)
|
# create the necessary network data for the project
|
||||||
self.service = service.VlanNetworkService()
|
self.network.set_network_host(self.context, self.projects[i].id)
|
||||||
|
instance_ref = db.instance_create(None,
|
||||||
|
{'mac_address': utils.generate_mac()})
|
||||||
|
self.instance_id = instance_ref['id']
|
||||||
|
instance_ref = db.instance_create(None,
|
||||||
|
{'mac_address': utils.generate_mac()})
|
||||||
|
self.instance2_id = instance_ref['id']
|
||||||
|
|
||||||
def tearDown(self): # pylint: disable-msg=C0103
|
def tearDown(self): # pylint: disable-msg=C0103
|
||||||
super(NetworkTestCase, self).tearDown()
|
super(NetworkTestCase, self).tearDown()
|
||||||
|
# TODO(termie): this should really be instantiating clean datastores
|
||||||
|
# in between runs, one failure kills all the tests
|
||||||
|
db.instance_destroy(None, self.instance_id)
|
||||||
|
db.instance_destroy(None, self.instance2_id)
|
||||||
for project in self.projects:
|
for project in self.projects:
|
||||||
self.manager.delete_project(project)
|
self.manager.delete_project(project)
|
||||||
self.manager.delete_user(self.user)
|
self.manager.delete_user(self.user)
|
||||||
|
|
||||||
def test_public_network_allocation(self):
|
def _create_address(self, project_num, instance_id=None):
|
||||||
|
"""Create an address in given project num"""
|
||||||
|
if instance_id is None:
|
||||||
|
instance_id = self.instance_id
|
||||||
|
self.context.project = self.projects[project_num]
|
||||||
|
return self.network.allocate_fixed_ip(self.context, instance_id)
|
||||||
|
|
||||||
|
def test_public_network_association(self):
|
||||||
"""Makes sure that we can allocaate a public ip"""
|
"""Makes sure that we can allocaate a public ip"""
|
||||||
|
# TODO(vish): better way of adding floating ips
|
||||||
pubnet = IPy.IP(flags.FLAGS.public_range)
|
pubnet = IPy.IP(flags.FLAGS.public_range)
|
||||||
address = self.service.allocate_elastic_ip(self.user.id,
|
address = str(pubnet[0])
|
||||||
|
try:
|
||||||
|
db.floating_ip_get_by_address(None, address)
|
||||||
|
except exception.NotFound:
|
||||||
|
db.floating_ip_create(None, {'address': address,
|
||||||
|
'host': FLAGS.host})
|
||||||
|
float_addr = self.network.allocate_floating_ip(self.context,
|
||||||
self.projects[0].id)
|
self.projects[0].id)
|
||||||
self.assertTrue(IPy.IP(address) in pubnet)
|
fix_addr = self._create_address(0)
|
||||||
|
self.assertEqual(float_addr, str(pubnet[0]))
|
||||||
|
self.network.associate_floating_ip(self.context, float_addr, fix_addr)
|
||||||
|
address = db.instance_get_floating_address(None, self.instance_id)
|
||||||
|
self.assertEqual(address, float_addr)
|
||||||
|
self.network.disassociate_floating_ip(self.context, float_addr)
|
||||||
|
address = db.instance_get_floating_address(None, self.instance_id)
|
||||||
|
self.assertEqual(address, None)
|
||||||
|
self.network.deallocate_floating_ip(self.context, float_addr)
|
||||||
|
self.network.deallocate_fixed_ip(self.context, fix_addr)
|
||||||
|
|
||||||
def test_allocate_deallocate_fixed_ip(self):
|
def test_allocate_deallocate_fixed_ip(self):
|
||||||
"""Makes sure that we can allocate and deallocate a fixed ip"""
|
"""Makes sure that we can allocate and deallocate a fixed ip"""
|
||||||
result = self.service.allocate_fixed_ip(
|
address = self._create_address(0)
|
||||||
self.user.id, self.projects[0].id)
|
self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
|
||||||
address = result['private_dns_name']
|
lease_ip(address)
|
||||||
mac = result['mac_address']
|
self.network.deallocate_fixed_ip(self.context, address)
|
||||||
net = model.get_project_network(self.projects[0].id, "default")
|
|
||||||
self.assertEqual(True, is_in_project(address, self.projects[0].id))
|
|
||||||
hostname = "test-host"
|
|
||||||
issue_ip(mac, address, hostname, net.bridge_name)
|
|
||||||
self.service.deallocate_fixed_ip(address)
|
|
||||||
|
|
||||||
# Doesn't go away until it's dhcp released
|
# Doesn't go away until it's dhcp released
|
||||||
self.assertEqual(True, is_in_project(address, self.projects[0].id))
|
self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
|
||||||
|
|
||||||
release_ip(mac, address, hostname, net.bridge_name)
|
release_ip(address)
|
||||||
self.assertEqual(False, is_in_project(address, self.projects[0].id))
|
self.assertFalse(is_allocated_in_project(address, self.projects[0].id))
|
||||||
|
|
||||||
def test_side_effects(self):
|
def test_side_effects(self):
|
||||||
"""Ensures allocating and releasing has no side effects"""
|
"""Ensures allocating and releasing has no side effects"""
|
||||||
hostname = "side-effect-host"
|
address = self._create_address(0)
|
||||||
result = self.service.allocate_fixed_ip(self.user.id,
|
address2 = self._create_address(1, self.instance2_id)
|
||||||
self.projects[0].id)
|
|
||||||
mac = result['mac_address']
|
|
||||||
address = result['private_dns_name']
|
|
||||||
result = self.service.allocate_fixed_ip(self.user,
|
|
||||||
self.projects[1].id)
|
|
||||||
secondmac = result['mac_address']
|
|
||||||
secondaddress = result['private_dns_name']
|
|
||||||
|
|
||||||
net = model.get_project_network(self.projects[0].id, "default")
|
self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
|
||||||
secondnet = model.get_project_network(self.projects[1].id, "default")
|
self.assertTrue(is_allocated_in_project(address2, self.projects[1].id))
|
||||||
|
self.assertFalse(is_allocated_in_project(address, self.projects[1].id))
|
||||||
self.assertEqual(True, is_in_project(address, self.projects[0].id))
|
|
||||||
self.assertEqual(True, is_in_project(secondaddress,
|
|
||||||
self.projects[1].id))
|
|
||||||
self.assertEqual(False, is_in_project(address, self.projects[1].id))
|
|
||||||
|
|
||||||
# Addresses are allocated before they're issued
|
# Addresses are allocated before they're issued
|
||||||
issue_ip(mac, address, hostname, net.bridge_name)
|
lease_ip(address)
|
||||||
issue_ip(secondmac, secondaddress, hostname, secondnet.bridge_name)
|
lease_ip(address2)
|
||||||
|
|
||||||
self.service.deallocate_fixed_ip(address)
|
self.network.deallocate_fixed_ip(self.context, address)
|
||||||
release_ip(mac, address, hostname, net.bridge_name)
|
release_ip(address)
|
||||||
self.assertEqual(False, is_in_project(address, self.projects[0].id))
|
self.assertFalse(is_allocated_in_project(address, self.projects[0].id))
|
||||||
|
|
||||||
# First address release shouldn't affect the second
|
# First address release shouldn't affect the second
|
||||||
self.assertEqual(True, is_in_project(secondaddress,
|
self.assertTrue(is_allocated_in_project(address2, self.projects[1].id))
|
||||||
self.projects[1].id))
|
|
||||||
|
|
||||||
self.service.deallocate_fixed_ip(secondaddress)
|
self.network.deallocate_fixed_ip(self.context, address2)
|
||||||
release_ip(secondmac, secondaddress, hostname, secondnet.bridge_name)
|
release_ip(address2)
|
||||||
self.assertEqual(False, is_in_project(secondaddress,
|
self.assertFalse(is_allocated_in_project(address2,
|
||||||
self.projects[1].id))
|
self.projects[1].id))
|
||||||
|
|
||||||
def test_subnet_edge(self):
|
def test_subnet_edge(self):
|
||||||
"""Makes sure that private ips don't overlap"""
|
"""Makes sure that private ips don't overlap"""
|
||||||
result = self.service.allocate_fixed_ip(self.user.id,
|
first = self._create_address(0)
|
||||||
self.projects[0].id)
|
lease_ip(first)
|
||||||
firstaddress = result['private_dns_name']
|
instance_ids = []
|
||||||
hostname = "toomany-hosts"
|
|
||||||
for i in range(1, 5):
|
for i in range(1, 5):
|
||||||
project_id = self.projects[i].id
|
mac = utils.generate_mac()
|
||||||
result = self.service.allocate_fixed_ip(
|
instance_ref = db.instance_create(None,
|
||||||
self.user, project_id)
|
{'mac_address': mac})
|
||||||
mac = result['mac_address']
|
instance_ids.append(instance_ref['id'])
|
||||||
address = result['private_dns_name']
|
address = self._create_address(i, instance_ref['id'])
|
||||||
result = self.service.allocate_fixed_ip(
|
mac = utils.generate_mac()
|
||||||
self.user, project_id)
|
instance_ref = db.instance_create(None,
|
||||||
mac2 = result['mac_address']
|
{'mac_address': mac})
|
||||||
address2 = result['private_dns_name']
|
instance_ids.append(instance_ref['id'])
|
||||||
result = self.service.allocate_fixed_ip(
|
address2 = self._create_address(i, instance_ref['id'])
|
||||||
self.user, project_id)
|
mac = utils.generate_mac()
|
||||||
mac3 = result['mac_address']
|
instance_ref = db.instance_create(None,
|
||||||
address3 = result['private_dns_name']
|
{'mac_address': mac})
|
||||||
net = model.get_project_network(project_id, "default")
|
instance_ids.append(instance_ref['id'])
|
||||||
issue_ip(mac, address, hostname, net.bridge_name)
|
address3 = self._create_address(i, instance_ref['id'])
|
||||||
issue_ip(mac2, address2, hostname, net.bridge_name)
|
lease_ip(address)
|
||||||
issue_ip(mac3, address3, hostname, net.bridge_name)
|
lease_ip(address2)
|
||||||
self.assertEqual(False, is_in_project(address,
|
lease_ip(address3)
|
||||||
|
self.assertFalse(is_allocated_in_project(address,
|
||||||
self.projects[0].id))
|
self.projects[0].id))
|
||||||
self.assertEqual(False, is_in_project(address2,
|
self.assertFalse(is_allocated_in_project(address2,
|
||||||
self.projects[0].id))
|
self.projects[0].id))
|
||||||
self.assertEqual(False, is_in_project(address3,
|
self.assertFalse(is_allocated_in_project(address3,
|
||||||
self.projects[0].id))
|
self.projects[0].id))
|
||||||
self.service.deallocate_fixed_ip(address)
|
self.network.deallocate_fixed_ip(self.context, address)
|
||||||
self.service.deallocate_fixed_ip(address2)
|
self.network.deallocate_fixed_ip(self.context, address2)
|
||||||
self.service.deallocate_fixed_ip(address3)
|
self.network.deallocate_fixed_ip(self.context, address3)
|
||||||
release_ip(mac, address, hostname, net.bridge_name)
|
release_ip(address)
|
||||||
release_ip(mac2, address2, hostname, net.bridge_name)
|
release_ip(address2)
|
||||||
release_ip(mac3, address3, hostname, net.bridge_name)
|
release_ip(address3)
|
||||||
net = model.get_project_network(self.projects[0].id, "default")
|
for instance_id in instance_ids:
|
||||||
self.service.deallocate_fixed_ip(firstaddress)
|
db.instance_destroy(None, instance_id)
|
||||||
|
release_ip(first)
|
||||||
|
self.network.deallocate_fixed_ip(self.context, first)
|
||||||
|
|
||||||
def test_vpn_ip_and_port_looks_valid(self):
|
def test_vpn_ip_and_port_looks_valid(self):
|
||||||
"""Ensure the vpn ip and port are reasonable"""
|
"""Ensure the vpn ip and port are reasonable"""
|
||||||
self.assert_(self.projects[0].vpn_ip)
|
self.assert_(self.projects[0].vpn_ip)
|
||||||
self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start_port)
|
self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start)
|
||||||
self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_end_port)
|
self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_start +
|
||||||
|
FLAGS.num_networks)
|
||||||
|
|
||||||
def test_too_many_vpns(self):
|
def test_too_many_networks(self):
|
||||||
"""Ensure error is raised if we run out of vpn ports"""
|
"""Ensure error is raised if we run out of networks"""
|
||||||
vpns = []
|
projects = []
|
||||||
for i in xrange(vpn.NetworkData.num_ports_for_ip(FLAGS.vpn_ip)):
|
networks_left = FLAGS.num_networks - db.network_count(None)
|
||||||
vpns.append(vpn.NetworkData.create("vpnuser%s" % i))
|
for i in range(networks_left):
|
||||||
self.assertRaises(vpn.NoMorePorts, vpn.NetworkData.create, "boom")
|
project = self.manager.create_project('many%s' % i, self.user)
|
||||||
for network_datum in vpns:
|
projects.append(project)
|
||||||
network_datum.destroy()
|
self.assertRaises(db.NoMoreNetworks,
|
||||||
|
self.manager.create_project,
|
||||||
|
'boom',
|
||||||
|
self.user)
|
||||||
|
for project in projects:
|
||||||
|
self.manager.delete_project(project)
|
||||||
|
|
||||||
def test_ips_are_reused(self):
|
def test_ips_are_reused(self):
|
||||||
"""Makes sure that ip addresses that are deallocated get reused"""
|
"""Makes sure that ip addresses that are deallocated get reused"""
|
||||||
result = self.service.allocate_fixed_ip(
|
address = self._create_address(0)
|
||||||
self.user.id, self.projects[0].id)
|
lease_ip(address)
|
||||||
mac = result['mac_address']
|
self.network.deallocate_fixed_ip(self.context, address)
|
||||||
address = result['private_dns_name']
|
release_ip(address)
|
||||||
|
|
||||||
hostname = "reuse-host"
|
address2 = self._create_address(0)
|
||||||
net = model.get_project_network(self.projects[0].id, "default")
|
self.assertEqual(address, address2)
|
||||||
|
self.network.deallocate_fixed_ip(self.context, address2)
|
||||||
issue_ip(mac, address, hostname, net.bridge_name)
|
|
||||||
self.service.deallocate_fixed_ip(address)
|
|
||||||
release_ip(mac, address, hostname, net.bridge_name)
|
|
||||||
|
|
||||||
result = self.service.allocate_fixed_ip(
|
|
||||||
self.user, self.projects[0].id)
|
|
||||||
secondmac = result['mac_address']
|
|
||||||
secondaddress = result['private_dns_name']
|
|
||||||
self.assertEqual(address, secondaddress)
|
|
||||||
issue_ip(secondmac, secondaddress, hostname, net.bridge_name)
|
|
||||||
self.service.deallocate_fixed_ip(secondaddress)
|
|
||||||
release_ip(secondmac, secondaddress, hostname, net.bridge_name)
|
|
||||||
|
|
||||||
def test_available_ips(self):
|
def test_available_ips(self):
|
||||||
"""Make sure the number of available ips for the network is correct
|
"""Make sure the number of available ips for the network is correct
|
||||||
@@ -216,44 +226,53 @@ class NetworkTestCase(test.TrialTestCase):
|
|||||||
There are ips reserved at the bottom and top of the range.
|
There are ips reserved at the bottom and top of the range.
|
||||||
services (network, gateway, CloudPipe, broadcast)
|
services (network, gateway, CloudPipe, broadcast)
|
||||||
"""
|
"""
|
||||||
net = model.get_project_network(self.projects[0].id, "default")
|
network = db.project_get_network(None, self.projects[0].id)
|
||||||
num_preallocated_ips = len(net.assigned)
|
|
||||||
net_size = flags.FLAGS.network_size
|
net_size = flags.FLAGS.network_size
|
||||||
num_available_ips = net_size - (net.num_bottom_reserved_ips +
|
total_ips = (db.network_count_available_ips(None, network['id']) +
|
||||||
num_preallocated_ips +
|
db.network_count_reserved_ips(None, network['id']) +
|
||||||
net.num_top_reserved_ips)
|
db.network_count_allocated_ips(None, network['id']))
|
||||||
self.assertEqual(num_available_ips, len(list(net.available)))
|
self.assertEqual(total_ips, net_size)
|
||||||
|
|
||||||
def test_too_many_addresses(self):
|
def test_too_many_addresses(self):
|
||||||
"""Test for a NoMoreAddresses exception when all fixed ips are used.
|
"""Test for a NoMoreAddresses exception when all fixed ips are used.
|
||||||
"""
|
"""
|
||||||
net = model.get_project_network(self.projects[0].id, "default")
|
network = db.project_get_network(None, self.projects[0].id)
|
||||||
|
num_available_ips = db.network_count_available_ips(None,
|
||||||
hostname = "toomany-hosts"
|
network['id'])
|
||||||
macs = {}
|
addresses = []
|
||||||
addresses = {}
|
instance_ids = []
|
||||||
# Number of availaible ips is len of the available list
|
|
||||||
num_available_ips = len(list(net.available))
|
|
||||||
for i in range(num_available_ips):
|
for i in range(num_available_ips):
|
||||||
result = self.service.allocate_fixed_ip(self.user.id,
|
mac = utils.generate_mac()
|
||||||
self.projects[0].id)
|
instance_ref = db.instance_create(None,
|
||||||
macs[i] = result['mac_address']
|
{'mac_address': mac})
|
||||||
addresses[i] = result['private_dns_name']
|
instance_ids.append(instance_ref['id'])
|
||||||
issue_ip(macs[i], addresses[i], hostname, net.bridge_name)
|
address = self._create_address(0, instance_ref['id'])
|
||||||
|
addresses.append(address)
|
||||||
|
lease_ip(address)
|
||||||
|
|
||||||
self.assertEqual(len(list(net.available)), 0)
|
self.assertEqual(db.network_count_available_ips(None,
|
||||||
self.assertRaises(NoMoreAddresses, self.service.allocate_fixed_ip,
|
network['id']), 0)
|
||||||
self.user.id, self.projects[0].id)
|
self.assertRaises(db.NoMoreAddresses,
|
||||||
|
self.network.allocate_fixed_ip,
|
||||||
|
self.context,
|
||||||
|
'foo')
|
||||||
|
|
||||||
for i in range(len(addresses)):
|
for i in range(num_available_ips):
|
||||||
self.service.deallocate_fixed_ip(addresses[i])
|
self.network.deallocate_fixed_ip(self.context, addresses[i])
|
||||||
release_ip(macs[i], addresses[i], hostname, net.bridge_name)
|
release_ip(addresses[i])
|
||||||
self.assertEqual(len(list(net.available)), num_available_ips)
|
db.instance_destroy(None, instance_ids[i])
|
||||||
|
self.assertEqual(db.network_count_available_ips(None,
|
||||||
|
network['id']),
|
||||||
|
num_available_ips)
|
||||||
|
|
||||||
|
|
||||||
def is_in_project(address, project_id):
|
def is_allocated_in_project(address, project_id):
|
||||||
"""Returns true if address is in specified project"""
|
"""Returns true if address is in specified project"""
|
||||||
return address in model.get_project_network(project_id).assigned
|
project_net = db.project_get_network(None, project_id)
|
||||||
|
network = db.fixed_ip_get_network(None, address)
|
||||||
|
instance = db.fixed_ip_get_instance(None, address)
|
||||||
|
# instance exists until release
|
||||||
|
return instance is not None and network['id'] == project_net['id']
|
||||||
|
|
||||||
|
|
||||||
def binpath(script):
|
def binpath(script):
|
||||||
@@ -261,22 +280,28 @@ def binpath(script):
|
|||||||
return os.path.abspath(os.path.join(__file__, "../../../bin", script))
|
return os.path.abspath(os.path.join(__file__, "../../../bin", script))
|
||||||
|
|
||||||
|
|
||||||
def issue_ip(mac, private_ip, hostname, interface):
|
def lease_ip(private_ip):
|
||||||
"""Run add command on dhcpbridge"""
|
"""Run add command on dhcpbridge"""
|
||||||
cmd = "%s add %s %s %s" % (binpath('nova-dhcpbridge'),
|
network_ref = db.fixed_ip_get_network(None, private_ip)
|
||||||
mac, private_ip, hostname)
|
instance_ref = db.fixed_ip_get_instance(None, private_ip)
|
||||||
env = {'DNSMASQ_INTERFACE': interface,
|
cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'),
|
||||||
|
instance_ref['mac_address'],
|
||||||
|
private_ip)
|
||||||
|
env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
|
||||||
'TESTING': '1',
|
'TESTING': '1',
|
||||||
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
|
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
|
||||||
(out, err) = utils.execute(cmd, addl_env=env)
|
(out, err) = utils.execute(cmd, addl_env=env)
|
||||||
logging.debug("ISSUE_IP: %s, %s ", out, err)
|
logging.debug("ISSUE_IP: %s, %s ", out, err)
|
||||||
|
|
||||||
|
|
||||||
def release_ip(mac, private_ip, hostname, interface):
|
def release_ip(private_ip):
|
||||||
"""Run del command on dhcpbridge"""
|
"""Run del command on dhcpbridge"""
|
||||||
cmd = "%s del %s %s %s" % (binpath('nova-dhcpbridge'),
|
network_ref = db.fixed_ip_get_network(None, private_ip)
|
||||||
mac, private_ip, hostname)
|
instance_ref = db.fixed_ip_get_instance(None, private_ip)
|
||||||
env = {'DNSMASQ_INTERFACE': interface,
|
cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'),
|
||||||
|
instance_ref['mac_address'],
|
||||||
|
private_ip)
|
||||||
|
env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
|
||||||
'TESTING': '1',
|
'TESTING': '1',
|
||||||
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
|
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
|
||||||
(out, err) = utils.execute(cmd, addl_env=env)
|
(out, err) = utils.execute(cmd, addl_env=env)
|
||||||
|
|||||||
@@ -21,7 +21,6 @@ from nova import flags
|
|||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
FLAGS.connection_type = 'libvirt'
|
FLAGS.connection_type = 'libvirt'
|
||||||
FLAGS.fake_storage = False
|
|
||||||
FLAGS.fake_rabbit = False
|
FLAGS.fake_rabbit = False
|
||||||
FLAGS.fake_network = False
|
FLAGS.fake_network = False
|
||||||
FLAGS.verbose = False
|
FLAGS.verbose = False
|
||||||
|
|||||||
182
nova/tests/service_unittest.py
Normal file
182
nova/tests/service_unittest.py
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Unit Tests for remote procedure calls using queue
|
||||||
|
"""
|
||||||
|
|
||||||
|
import mox
|
||||||
|
|
||||||
|
from nova import exception
|
||||||
|
from nova import flags
|
||||||
|
from nova import rpc
|
||||||
|
from nova import test
|
||||||
|
from nova import service
|
||||||
|
from nova import manager
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
flags.DEFINE_string("fake_manager", "nova.tests.service_unittest.FakeManager",
|
||||||
|
"Manager for testing")
|
||||||
|
|
||||||
|
|
||||||
|
class FakeManager(manager.Manager):
|
||||||
|
"""Fake manager for tests"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceTestCase(test.BaseTestCase):
|
||||||
|
"""Test cases for rpc"""
|
||||||
|
|
||||||
|
def setUp(self): # pylint: disable=C0103
|
||||||
|
super(ServiceTestCase, self).setUp()
|
||||||
|
self.mox.StubOutWithMock(service, 'db')
|
||||||
|
|
||||||
|
def test_create(self):
|
||||||
|
host = 'foo'
|
||||||
|
binary = 'nova-fake'
|
||||||
|
topic = 'fake'
|
||||||
|
self.mox.StubOutWithMock(rpc,
|
||||||
|
'AdapterConsumer',
|
||||||
|
use_mock_anything=True)
|
||||||
|
self.mox.StubOutWithMock(
|
||||||
|
service.task, 'LoopingCall', use_mock_anything=True)
|
||||||
|
rpc.AdapterConsumer(connection=mox.IgnoreArg(),
|
||||||
|
topic=topic,
|
||||||
|
proxy=mox.IsA(service.Service)).AndReturn(
|
||||||
|
rpc.AdapterConsumer)
|
||||||
|
|
||||||
|
rpc.AdapterConsumer(connection=mox.IgnoreArg(),
|
||||||
|
topic='%s.%s' % (topic, host),
|
||||||
|
proxy=mox.IsA(service.Service)).AndReturn(
|
||||||
|
rpc.AdapterConsumer)
|
||||||
|
|
||||||
|
# Stub out looping call a bit needlessly since we don't have an easy
|
||||||
|
# way to cancel it (yet) when the tests finishes
|
||||||
|
service.task.LoopingCall(mox.IgnoreArg()).AndReturn(
|
||||||
|
service.task.LoopingCall)
|
||||||
|
service.task.LoopingCall.start(interval=mox.IgnoreArg(),
|
||||||
|
now=mox.IgnoreArg())
|
||||||
|
|
||||||
|
rpc.AdapterConsumer.attach_to_twisted()
|
||||||
|
rpc.AdapterConsumer.attach_to_twisted()
|
||||||
|
service_create = {'host': host,
|
||||||
|
'binary': binary,
|
||||||
|
'topic': topic,
|
||||||
|
'report_count': 0}
|
||||||
|
service_ref = {'host': host,
|
||||||
|
'binary': binary,
|
||||||
|
'report_count': 0,
|
||||||
|
'id': 1}
|
||||||
|
|
||||||
|
service.db.service_get_by_args(None,
|
||||||
|
host,
|
||||||
|
binary).AndRaise(exception.NotFound())
|
||||||
|
service.db.service_create(None,
|
||||||
|
service_create).AndReturn(service_ref)
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
|
||||||
|
app = service.Service.create(host=host, binary=binary)
|
||||||
|
self.assert_(app)
|
||||||
|
|
||||||
|
# We're testing sort of weird behavior in how report_state decides
|
||||||
|
# whether it is disconnected, it looks for a variable on itself called
|
||||||
|
# 'model_disconnected' and report_state doesn't really do much so this
|
||||||
|
# these are mostly just for coverage
|
||||||
|
def test_report_state(self):
|
||||||
|
host = 'foo'
|
||||||
|
binary = 'bar'
|
||||||
|
service_ref = {'host': host,
|
||||||
|
'binary': binary,
|
||||||
|
'report_count': 0,
|
||||||
|
'id': 1}
|
||||||
|
service.db.__getattr__('report_state')
|
||||||
|
service.db.service_get_by_args(None,
|
||||||
|
host,
|
||||||
|
binary).AndReturn(service_ref)
|
||||||
|
service.db.service_update(None, service_ref['id'],
|
||||||
|
mox.ContainsKeyValue('report_count', 1))
|
||||||
|
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
s = service.Service()
|
||||||
|
rv = yield s.report_state(host, binary)
|
||||||
|
|
||||||
|
def test_report_state_no_service(self):
|
||||||
|
host = 'foo'
|
||||||
|
binary = 'bar'
|
||||||
|
service_create = {'host': host,
|
||||||
|
'binary': binary,
|
||||||
|
'report_count': 0}
|
||||||
|
service_ref = {'host': host,
|
||||||
|
'binary': binary,
|
||||||
|
'report_count': 0,
|
||||||
|
'id': 1}
|
||||||
|
|
||||||
|
service.db.__getattr__('report_state')
|
||||||
|
service.db.service_get_by_args(None,
|
||||||
|
host,
|
||||||
|
binary).AndRaise(exception.NotFound())
|
||||||
|
service.db.service_create(None,
|
||||||
|
service_create).AndReturn(service_ref)
|
||||||
|
service.db.service_get(None, service_ref['id']).AndReturn(service_ref)
|
||||||
|
service.db.service_update(None, service_ref['id'],
|
||||||
|
mox.ContainsKeyValue('report_count', 1))
|
||||||
|
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
s = service.Service()
|
||||||
|
rv = yield s.report_state(host, binary)
|
||||||
|
|
||||||
|
def test_report_state_newly_disconnected(self):
|
||||||
|
host = 'foo'
|
||||||
|
binary = 'bar'
|
||||||
|
service_ref = {'host': host,
|
||||||
|
'binary': binary,
|
||||||
|
'report_count': 0,
|
||||||
|
'id': 1}
|
||||||
|
|
||||||
|
service.db.__getattr__('report_state')
|
||||||
|
service.db.service_get_by_args(None,
|
||||||
|
host,
|
||||||
|
binary).AndRaise(Exception())
|
||||||
|
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
s = service.Service()
|
||||||
|
rv = yield s.report_state(host, binary)
|
||||||
|
|
||||||
|
self.assert_(s.model_disconnected)
|
||||||
|
|
||||||
|
def test_report_state_newly_connected(self):
|
||||||
|
host = 'foo'
|
||||||
|
binary = 'bar'
|
||||||
|
service_ref = {'host': host,
|
||||||
|
'binary': binary,
|
||||||
|
'report_count': 0,
|
||||||
|
'id': 1}
|
||||||
|
|
||||||
|
service.db.__getattr__('report_state')
|
||||||
|
service.db.service_get_by_args(None,
|
||||||
|
host,
|
||||||
|
binary).AndReturn(service_ref)
|
||||||
|
service.db.service_update(None, service_ref['id'],
|
||||||
|
mox.ContainsKeyValue('report_count', 1))
|
||||||
|
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
s = service.Service()
|
||||||
|
s.model_disconnected = True
|
||||||
|
rv = yield s.report_state(host, binary)
|
||||||
|
|
||||||
|
self.assert_(not s.model_disconnected)
|
||||||
@@ -1,115 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from nova import exception
|
|
||||||
from nova import flags
|
|
||||||
from nova import test
|
|
||||||
from nova.compute import node
|
|
||||||
from nova.volume import storage
|
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
|
|
||||||
|
|
||||||
class StorageTestCase(test.TrialTestCase):
|
|
||||||
def setUp(self):
|
|
||||||
logging.getLogger().setLevel(logging.DEBUG)
|
|
||||||
super(StorageTestCase, self).setUp()
|
|
||||||
self.mynode = node.Node()
|
|
||||||
self.mystorage = None
|
|
||||||
self.flags(connection_type='fake',
|
|
||||||
fake_storage=True)
|
|
||||||
self.mystorage = storage.BlockStore()
|
|
||||||
|
|
||||||
def test_run_create_volume(self):
|
|
||||||
vol_size = '0'
|
|
||||||
user_id = 'fake'
|
|
||||||
project_id = 'fake'
|
|
||||||
volume_id = self.mystorage.create_volume(vol_size, user_id, project_id)
|
|
||||||
# TODO(termie): get_volume returns differently than create_volume
|
|
||||||
self.assertEqual(volume_id,
|
|
||||||
storage.get_volume(volume_id)['volume_id'])
|
|
||||||
|
|
||||||
rv = self.mystorage.delete_volume(volume_id)
|
|
||||||
self.assertRaises(exception.Error,
|
|
||||||
storage.get_volume,
|
|
||||||
volume_id)
|
|
||||||
|
|
||||||
def test_too_big_volume(self):
|
|
||||||
vol_size = '1001'
|
|
||||||
user_id = 'fake'
|
|
||||||
project_id = 'fake'
|
|
||||||
self.assertRaises(TypeError,
|
|
||||||
self.mystorage.create_volume,
|
|
||||||
vol_size, user_id, project_id)
|
|
||||||
|
|
||||||
def test_too_many_volumes(self):
|
|
||||||
vol_size = '1'
|
|
||||||
user_id = 'fake'
|
|
||||||
project_id = 'fake'
|
|
||||||
num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1
|
|
||||||
total_slots = FLAGS.slots_per_shelf * num_shelves
|
|
||||||
vols = []
|
|
||||||
for i in xrange(total_slots):
|
|
||||||
vid = self.mystorage.create_volume(vol_size, user_id, project_id)
|
|
||||||
vols.append(vid)
|
|
||||||
self.assertRaises(storage.NoMoreVolumes,
|
|
||||||
self.mystorage.create_volume,
|
|
||||||
vol_size, user_id, project_id)
|
|
||||||
for id in vols:
|
|
||||||
self.mystorage.delete_volume(id)
|
|
||||||
|
|
||||||
def test_run_attach_detach_volume(self):
|
|
||||||
# Create one volume and one node to test with
|
|
||||||
instance_id = "storage-test"
|
|
||||||
vol_size = "5"
|
|
||||||
user_id = "fake"
|
|
||||||
project_id = 'fake'
|
|
||||||
mountpoint = "/dev/sdf"
|
|
||||||
volume_id = self.mystorage.create_volume(vol_size, user_id, project_id)
|
|
||||||
|
|
||||||
volume_obj = storage.get_volume(volume_id)
|
|
||||||
volume_obj.start_attach(instance_id, mountpoint)
|
|
||||||
rv = yield self.mynode.attach_volume(volume_id,
|
|
||||||
instance_id,
|
|
||||||
mountpoint)
|
|
||||||
self.assertEqual(volume_obj['status'], "in-use")
|
|
||||||
self.assertEqual(volume_obj['attachStatus'], "attached")
|
|
||||||
self.assertEqual(volume_obj['instance_id'], instance_id)
|
|
||||||
self.assertEqual(volume_obj['mountpoint'], mountpoint)
|
|
||||||
|
|
||||||
self.assertRaises(exception.Error,
|
|
||||||
self.mystorage.delete_volume,
|
|
||||||
volume_id)
|
|
||||||
|
|
||||||
rv = yield self.mystorage.detach_volume(volume_id)
|
|
||||||
volume_obj = storage.get_volume(volume_id)
|
|
||||||
self.assertEqual(volume_obj['status'], "available")
|
|
||||||
|
|
||||||
rv = self.mystorage.delete_volume(volume_id)
|
|
||||||
self.assertRaises(exception.Error,
|
|
||||||
storage.get_volume,
|
|
||||||
volume_id)
|
|
||||||
|
|
||||||
def test_multi_node(self):
|
|
||||||
# TODO(termie): Figure out how to test with two nodes,
|
|
||||||
# each of them having a different FLAG for storage_node
|
|
||||||
# This will allow us to test cross-node interactions
|
|
||||||
pass
|
|
||||||
@@ -15,139 +15,159 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
"""
|
||||||
|
Tests for Volume Code
|
||||||
|
"""
|
||||||
import logging
|
import logging
|
||||||
import shutil
|
|
||||||
import tempfile
|
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from nova import compute
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
|
from nova import db
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova.volume import service as volume_service
|
from nova import utils
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
class VolumeTestCase(test.TrialTestCase):
|
class VolumeTestCase(test.TrialTestCase):
|
||||||
def setUp(self):
|
"""Test Case for volumes"""
|
||||||
|
def setUp(self): # pylint: disable-msg=C0103
|
||||||
logging.getLogger().setLevel(logging.DEBUG)
|
logging.getLogger().setLevel(logging.DEBUG)
|
||||||
super(VolumeTestCase, self).setUp()
|
super(VolumeTestCase, self).setUp()
|
||||||
self.compute = compute.service.ComputeService()
|
self.compute = utils.import_object(FLAGS.compute_manager)
|
||||||
self.volume = None
|
self.flags(connection_type='fake')
|
||||||
self.tempdir = tempfile.mkdtemp()
|
self.volume = utils.import_object(FLAGS.volume_manager)
|
||||||
self.flags(connection_type='fake',
|
self.context = None
|
||||||
fake_storage=True,
|
|
||||||
aoe_export_dir=self.tempdir)
|
|
||||||
self.volume = volume_service.VolumeService()
|
|
||||||
|
|
||||||
def tearDown(self):
|
@staticmethod
|
||||||
shutil.rmtree(self.tempdir)
|
def _create_volume(size='0'):
|
||||||
|
"""Create a volume object"""
|
||||||
|
vol = {}
|
||||||
|
vol['size'] = size
|
||||||
|
vol['user_id'] = 'fake'
|
||||||
|
vol['project_id'] = 'fake'
|
||||||
|
vol['availability_zone'] = FLAGS.storage_availability_zone
|
||||||
|
vol['status'] = "creating"
|
||||||
|
vol['attach_status'] = "detached"
|
||||||
|
return db.volume_create(None, vol)['id']
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_run_create_volume(self):
|
def test_create_delete_volume(self):
|
||||||
vol_size = '0'
|
"""Test volume can be created and deleted"""
|
||||||
user_id = 'fake'
|
volume_id = self._create_volume()
|
||||||
project_id = 'fake'
|
yield self.volume.create_volume(self.context, volume_id)
|
||||||
volume_id = yield self.volume.create_volume(vol_size, user_id, project_id)
|
self.assertEqual(volume_id, db.volume_get(None, volume_id).id)
|
||||||
# TODO(termie): get_volume returns differently than create_volume
|
|
||||||
self.assertEqual(volume_id,
|
|
||||||
volume_service.get_volume(volume_id)['volume_id'])
|
|
||||||
|
|
||||||
rv = self.volume.delete_volume(volume_id)
|
yield self.volume.delete_volume(self.context, volume_id)
|
||||||
self.assertRaises(exception.Error, volume_service.get_volume, volume_id)
|
self.assertRaises(exception.NotFound,
|
||||||
|
db.volume_get,
|
||||||
|
None,
|
||||||
|
volume_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_too_big_volume(self):
|
def test_too_big_volume(self):
|
||||||
vol_size = '1001'
|
"""Ensure failure if a too large of a volume is requested"""
|
||||||
user_id = 'fake'
|
# FIXME(vish): validation needs to move into the data layer in
|
||||||
project_id = 'fake'
|
# volume_create
|
||||||
|
defer.returnValue(True)
|
||||||
try:
|
try:
|
||||||
yield self.volume.create_volume(vol_size, user_id, project_id)
|
volume_id = self._create_volume('1001')
|
||||||
|
yield self.volume.create_volume(self.context, volume_id)
|
||||||
self.fail("Should have thrown TypeError")
|
self.fail("Should have thrown TypeError")
|
||||||
except TypeError:
|
except TypeError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_too_many_volumes(self):
|
def test_too_many_volumes(self):
|
||||||
vol_size = '1'
|
"""Ensure that NoMoreBlades is raised when we run out of volumes"""
|
||||||
user_id = 'fake'
|
|
||||||
project_id = 'fake'
|
|
||||||
num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1
|
|
||||||
total_slots = FLAGS.blades_per_shelf * num_shelves
|
|
||||||
vols = []
|
vols = []
|
||||||
from nova import datastore
|
total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf
|
||||||
redis = datastore.Redis.instance()
|
for _index in xrange(total_slots):
|
||||||
for i in xrange(total_slots):
|
volume_id = self._create_volume()
|
||||||
vid = yield self.volume.create_volume(vol_size, user_id, project_id)
|
yield self.volume.create_volume(self.context, volume_id)
|
||||||
vols.append(vid)
|
vols.append(volume_id)
|
||||||
self.assertFailure(self.volume.create_volume(vol_size,
|
volume_id = self._create_volume()
|
||||||
user_id,
|
self.assertFailure(self.volume.create_volume(self.context,
|
||||||
project_id),
|
volume_id),
|
||||||
volume_service.NoMoreBlades)
|
db.NoMoreBlades)
|
||||||
for id in vols:
|
db.volume_destroy(None, volume_id)
|
||||||
yield self.volume.delete_volume(id)
|
for volume_id in vols:
|
||||||
|
yield self.volume.delete_volume(self.context, volume_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_run_attach_detach_volume(self):
|
def test_run_attach_detach_volume(self):
|
||||||
# Create one volume and one compute to test with
|
"""Make sure volume can be attached and detached from instance"""
|
||||||
instance_id = "storage-test"
|
inst = {}
|
||||||
vol_size = "5"
|
inst['image_id'] = 'ami-test'
|
||||||
user_id = "fake"
|
inst['reservation_id'] = 'r-fakeres'
|
||||||
project_id = 'fake'
|
inst['launch_time'] = '10'
|
||||||
|
inst['user_id'] = 'fake'
|
||||||
|
inst['project_id'] = 'fake'
|
||||||
|
inst['instance_type'] = 'm1.tiny'
|
||||||
|
inst['mac_address'] = utils.generate_mac()
|
||||||
|
inst['ami_launch_index'] = 0
|
||||||
|
instance_id = db.instance_create(self.context, inst)['id']
|
||||||
mountpoint = "/dev/sdf"
|
mountpoint = "/dev/sdf"
|
||||||
volume_id = yield self.volume.create_volume(vol_size, user_id, project_id)
|
volume_id = self._create_volume()
|
||||||
volume_obj = volume_service.get_volume(volume_id)
|
yield self.volume.create_volume(self.context, volume_id)
|
||||||
volume_obj.start_attach(instance_id, mountpoint)
|
|
||||||
if FLAGS.fake_tests:
|
if FLAGS.fake_tests:
|
||||||
volume_obj.finish_attach()
|
db.volume_attached(None, volume_id, instance_id, mountpoint)
|
||||||
else:
|
else:
|
||||||
rv = yield self.compute.attach_volume(instance_id,
|
yield self.compute.attach_volume(instance_id,
|
||||||
volume_id,
|
volume_id,
|
||||||
mountpoint)
|
mountpoint)
|
||||||
self.assertEqual(volume_obj['status'], "in-use")
|
vol = db.volume_get(None, volume_id)
|
||||||
self.assertEqual(volume_obj['attach_status'], "attached")
|
self.assertEqual(vol['status'], "in-use")
|
||||||
self.assertEqual(volume_obj['instance_id'], instance_id)
|
self.assertEqual(vol['attach_status'], "attached")
|
||||||
self.assertEqual(volume_obj['mountpoint'], mountpoint)
|
self.assertEqual(vol['mountpoint'], mountpoint)
|
||||||
|
instance_ref = db.volume_get_instance(self.context, volume_id)
|
||||||
|
self.assertEqual(instance_ref['id'], instance_id)
|
||||||
|
|
||||||
self.assertFailure(self.volume.delete_volume(volume_id), exception.Error)
|
self.assertFailure(self.volume.delete_volume(self.context, volume_id),
|
||||||
volume_obj.start_detach()
|
exception.Error)
|
||||||
if FLAGS.fake_tests:
|
if FLAGS.fake_tests:
|
||||||
volume_obj.finish_detach()
|
db.volume_detached(None, volume_id)
|
||||||
else:
|
else:
|
||||||
rv = yield self.volume.detach_volume(instance_id,
|
yield self.compute.detach_volume(instance_id,
|
||||||
volume_id)
|
volume_id)
|
||||||
volume_obj = volume_service.get_volume(volume_id)
|
vol = db.volume_get(None, volume_id)
|
||||||
self.assertEqual(volume_obj['status'], "available")
|
self.assertEqual(vol['status'], "available")
|
||||||
|
|
||||||
rv = self.volume.delete_volume(volume_id)
|
yield self.volume.delete_volume(self.context, volume_id)
|
||||||
self.assertRaises(exception.Error,
|
self.assertRaises(exception.Error,
|
||||||
volume_service.get_volume,
|
db.volume_get,
|
||||||
|
None,
|
||||||
volume_id)
|
volume_id)
|
||||||
|
db.instance_destroy(self.context, instance_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_multiple_volume_race_condition(self):
|
def test_concurrent_volumes_get_different_blades(self):
|
||||||
vol_size = "5"
|
"""Ensure multiple concurrent volumes get different blades"""
|
||||||
user_id = "fake"
|
volume_ids = []
|
||||||
project_id = 'fake'
|
|
||||||
shelf_blades = []
|
shelf_blades = []
|
||||||
|
|
||||||
def _check(volume_id):
|
def _check(volume_id):
|
||||||
vol = volume_service.get_volume(volume_id)
|
"""Make sure blades aren't duplicated"""
|
||||||
shelf_blade = '%s.%s' % (vol['shelf_id'], vol['blade_id'])
|
volume_ids.append(volume_id)
|
||||||
|
(shelf_id, blade_id) = db.volume_get_shelf_and_blade(None,
|
||||||
|
volume_id)
|
||||||
|
shelf_blade = '%s.%s' % (shelf_id, blade_id)
|
||||||
self.assert_(shelf_blade not in shelf_blades)
|
self.assert_(shelf_blade not in shelf_blades)
|
||||||
shelf_blades.append(shelf_blade)
|
shelf_blades.append(shelf_blade)
|
||||||
logging.debug("got %s" % shelf_blade)
|
logging.debug("Blade %s allocated", shelf_blade)
|
||||||
vol.destroy()
|
|
||||||
deferreds = []
|
deferreds = []
|
||||||
for i in range(5):
|
total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf
|
||||||
d = self.volume.create_volume(vol_size, user_id, project_id)
|
for _index in xrange(total_slots):
|
||||||
|
volume_id = self._create_volume()
|
||||||
|
d = self.volume.create_volume(self.context, volume_id)
|
||||||
d.addCallback(_check)
|
d.addCallback(_check)
|
||||||
d.addErrback(self.fail)
|
d.addErrback(self.fail)
|
||||||
deferreds.append(d)
|
deferreds.append(d)
|
||||||
yield defer.DeferredList(deferreds)
|
yield defer.DeferredList(deferreds)
|
||||||
|
for volume_id in volume_ids:
|
||||||
|
self.volume.delete_volume(self.context, volume_id)
|
||||||
|
|
||||||
def test_multi_node(self):
|
def test_multi_node(self):
|
||||||
# TODO(termie): Figure out how to test with two nodes,
|
# TODO(termie): Figure out how to test with two nodes,
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ Twisted daemon helpers, specifically to parse out gFlags from twisted flags,
|
|||||||
manage pid files and support syslogging.
|
manage pid files and support syslogging.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import gflags
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import signal
|
import signal
|
||||||
@@ -49,6 +50,14 @@ class TwistdServerOptions(ServerOptions):
|
|||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
|
class FlagParser(object):
|
||||||
|
def __init__(self, parser):
|
||||||
|
self.parser = parser
|
||||||
|
|
||||||
|
def Parse(self, s):
|
||||||
|
return self.parser(s)
|
||||||
|
|
||||||
|
|
||||||
def WrapTwistedOptions(wrapped):
|
def WrapTwistedOptions(wrapped):
|
||||||
class TwistedOptionsToFlags(wrapped):
|
class TwistedOptionsToFlags(wrapped):
|
||||||
subCommands = None
|
subCommands = None
|
||||||
@@ -79,7 +88,12 @@ def WrapTwistedOptions(wrapped):
|
|||||||
reflect.accumulateClassList(self.__class__, 'optParameters', twistd_params)
|
reflect.accumulateClassList(self.__class__, 'optParameters', twistd_params)
|
||||||
for param in twistd_params:
|
for param in twistd_params:
|
||||||
key = param[0].replace('-', '_')
|
key = param[0].replace('-', '_')
|
||||||
flags.DEFINE_string(key, param[2], str(param[-1]))
|
if len(param) > 4:
|
||||||
|
flags.DEFINE(FlagParser(param[4]),
|
||||||
|
key, param[2], str(param[3]),
|
||||||
|
serializer=gflags.ArgumentSerializer())
|
||||||
|
else:
|
||||||
|
flags.DEFINE_string(key, param[2], str(param[3]))
|
||||||
|
|
||||||
def _absorbHandlers(self):
|
def _absorbHandlers(self):
|
||||||
twistd_handlers = {}
|
twistd_handlers = {}
|
||||||
|
|||||||
@@ -1,96 +0,0 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# Copyright 2010 OpenStack LLC.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Test WSGI basics and provide some helper functions for other WSGI tests.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
import routes
|
|
||||||
import webob
|
|
||||||
|
|
||||||
from nova import wsgi
|
|
||||||
|
|
||||||
|
|
||||||
class Test(unittest.TestCase):
|
|
||||||
|
|
||||||
def test_debug(self):
|
|
||||||
|
|
||||||
class Application(wsgi.Application):
|
|
||||||
"""Dummy application to test debug."""
|
|
||||||
|
|
||||||
def __call__(self, environ, start_response):
|
|
||||||
start_response("200", [("X-Test", "checking")])
|
|
||||||
return ['Test result']
|
|
||||||
|
|
||||||
application = wsgi.Debug(Application())
|
|
||||||
result = webob.Request.blank('/').get_response(application)
|
|
||||||
self.assertEqual(result.body, "Test result")
|
|
||||||
|
|
||||||
def test_router(self):
|
|
||||||
|
|
||||||
class Application(wsgi.Application):
|
|
||||||
"""Test application to call from router."""
|
|
||||||
|
|
||||||
def __call__(self, environ, start_response):
|
|
||||||
start_response("200", [])
|
|
||||||
return ['Router result']
|
|
||||||
|
|
||||||
class Router(wsgi.Router):
|
|
||||||
"""Test router."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
mapper = routes.Mapper()
|
|
||||||
mapper.connect("/test", controller=Application())
|
|
||||||
super(Router, self).__init__(mapper)
|
|
||||||
|
|
||||||
result = webob.Request.blank('/test').get_response(Router())
|
|
||||||
self.assertEqual(result.body, "Router result")
|
|
||||||
result = webob.Request.blank('/bad').get_response(Router())
|
|
||||||
self.assertNotEqual(result.body, "Router result")
|
|
||||||
|
|
||||||
def test_controller(self):
|
|
||||||
|
|
||||||
class Controller(wsgi.Controller):
|
|
||||||
"""Test controller to call from router."""
|
|
||||||
test = self
|
|
||||||
|
|
||||||
def show(self, req, id): # pylint: disable-msg=W0622,C0103
|
|
||||||
"""Default action called for requests with an ID."""
|
|
||||||
self.test.assertEqual(req.path_info, '/tests/123')
|
|
||||||
self.test.assertEqual(id, '123')
|
|
||||||
return id
|
|
||||||
|
|
||||||
class Router(wsgi.Router):
|
|
||||||
"""Test router."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
mapper = routes.Mapper()
|
|
||||||
mapper.resource("test", "tests", controller=Controller())
|
|
||||||
super(Router, self).__init__(mapper)
|
|
||||||
|
|
||||||
result = webob.Request.blank('/tests/123').get_response(Router())
|
|
||||||
self.assertEqual(result.body, "123")
|
|
||||||
result = webob.Request.blank('/test/123').get_response(Router())
|
|
||||||
self.assertNotEqual(result.body, "123")
|
|
||||||
|
|
||||||
def test_serializer(self):
|
|
||||||
# TODO(eday): Placeholder for serializer testing.
|
|
||||||
pass
|
|
||||||
@@ -55,11 +55,11 @@ from nova.tests.api_unittest import *
|
|||||||
from nova.tests.cloud_unittest import *
|
from nova.tests.cloud_unittest import *
|
||||||
from nova.tests.compute_unittest import *
|
from nova.tests.compute_unittest import *
|
||||||
from nova.tests.flags_unittest import *
|
from nova.tests.flags_unittest import *
|
||||||
from nova.tests.model_unittest import *
|
|
||||||
from nova.tests.network_unittest import *
|
from nova.tests.network_unittest import *
|
||||||
from nova.tests.objectstore_unittest import *
|
from nova.tests.objectstore_unittest import *
|
||||||
from nova.tests.process_unittest import *
|
from nova.tests.process_unittest import *
|
||||||
from nova.tests.rpc_unittest import *
|
from nova.tests.rpc_unittest import *
|
||||||
|
from nova.tests.service_unittest import *
|
||||||
from nova.tests.validator_unittest import *
|
from nova.tests.validator_unittest import *
|
||||||
from nova.tests.volume_unittest import *
|
from nova.tests.volume_unittest import *
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user