Resolving conflict and finish test_images
This commit is contained in:
5
Authors
5
Authors
@@ -1,4 +1,5 @@
|
|||||||
Alex Meade <alex.meade@rackspace.com>
|
Alex Meade <alex.meade@rackspace.com>
|
||||||
|
Andrey Brindeyev <abrindeyev@griddynamics.com>
|
||||||
Andy Smith <code@term.ie>
|
Andy Smith <code@term.ie>
|
||||||
Andy Southgate <andy.southgate@citrix.com>
|
Andy Southgate <andy.southgate@citrix.com>
|
||||||
Anne Gentle <anne@openstack.org>
|
Anne Gentle <anne@openstack.org>
|
||||||
@@ -16,6 +17,7 @@ Christian Berendt <berendt@b1-systems.de>
|
|||||||
Chuck Short <zulcss@ubuntu.com>
|
Chuck Short <zulcss@ubuntu.com>
|
||||||
Cory Wright <corywright@gmail.com>
|
Cory Wright <corywright@gmail.com>
|
||||||
Dan Prince <dan.prince@rackspace.com>
|
Dan Prince <dan.prince@rackspace.com>
|
||||||
|
Dave Walker <DaveWalker@ubuntu.com>
|
||||||
David Pravec <David.Pravec@danix.org>
|
David Pravec <David.Pravec@danix.org>
|
||||||
Dean Troyer <dtroyer@gmail.com>
|
Dean Troyer <dtroyer@gmail.com>
|
||||||
Devin Carlen <devin.carlen@gmail.com>
|
Devin Carlen <devin.carlen@gmail.com>
|
||||||
@@ -28,6 +30,7 @@ Gabe Westmaas <gabe.westmaas@rackspace.com>
|
|||||||
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
|
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
|
||||||
Hisaki Ohara <hisaki.ohara@intel.com>
|
Hisaki Ohara <hisaki.ohara@intel.com>
|
||||||
Ilya Alekseyev <ialekseev@griddynamics.com>
|
Ilya Alekseyev <ialekseev@griddynamics.com>
|
||||||
|
Isaku Yamahata <yamahata@valinux.co.jp>
|
||||||
Jason Koelker <jason@koelker.net>
|
Jason Koelker <jason@koelker.net>
|
||||||
Jay Pipes <jaypipes@gmail.com>
|
Jay Pipes <jaypipes@gmail.com>
|
||||||
Jesse Andrews <anotherjesse@gmail.com>
|
Jesse Andrews <anotherjesse@gmail.com>
|
||||||
@@ -64,6 +67,7 @@ Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
|
|||||||
Naveed Massjouni <naveedm9@gmail.com>
|
Naveed Massjouni <naveedm9@gmail.com>
|
||||||
Nirmal Ranganathan <nirmal.ranganathan@rackspace.com>
|
Nirmal Ranganathan <nirmal.ranganathan@rackspace.com>
|
||||||
Paul Voccio <paul@openstack.org>
|
Paul Voccio <paul@openstack.org>
|
||||||
|
Renuka Apte <renuka.apte@citrix.com>
|
||||||
Ricardo Carrillo Cruz <emaildericky@gmail.com>
|
Ricardo Carrillo Cruz <emaildericky@gmail.com>
|
||||||
Rick Clark <rick@openstack.org>
|
Rick Clark <rick@openstack.org>
|
||||||
Rick Harris <rconradharris@gmail.com>
|
Rick Harris <rconradharris@gmail.com>
|
||||||
@@ -80,6 +84,7 @@ Trey Morris <trey.morris@rackspace.com>
|
|||||||
Tushar Patil <tushar.vitthal.patil@gmail.com>
|
Tushar Patil <tushar.vitthal.patil@gmail.com>
|
||||||
Vasiliy Shlykov <vash@vasiliyshlykov.org>
|
Vasiliy Shlykov <vash@vasiliyshlykov.org>
|
||||||
Vishvananda Ishaya <vishvananda@gmail.com>
|
Vishvananda Ishaya <vishvananda@gmail.com>
|
||||||
|
Vivek Y S <vivek.ys@gmail.com>
|
||||||
William Wolf <throughnothing@gmail.com>
|
William Wolf <throughnothing@gmail.com>
|
||||||
Yoshiaki Tamura <yoshi@midokura.jp>
|
Yoshiaki Tamura <yoshi@midokura.jp>
|
||||||
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
||||||
|
|||||||
@@ -35,6 +35,7 @@ include nova/tests/bundle/1mb.manifest.xml
|
|||||||
include nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml
|
include nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml
|
||||||
include nova/tests/bundle/1mb.part.0
|
include nova/tests/bundle/1mb.part.0
|
||||||
include nova/tests/bundle/1mb.part.1
|
include nova/tests/bundle/1mb.part.1
|
||||||
|
include nova/tests/public_key/*
|
||||||
include nova/tests/db/nova.austin.sqlite
|
include nova/tests/db/nova.austin.sqlite
|
||||||
include plugins/xenapi/README
|
include plugins/xenapi/README
|
||||||
include plugins/xenapi/etc/xapi.d/plugins/objectstore
|
include plugins/xenapi/etc/xapi.d/plugins/objectstore
|
||||||
|
|||||||
@@ -108,6 +108,13 @@ def main():
|
|||||||
interface = os.environ.get('DNSMASQ_INTERFACE', FLAGS.dnsmasq_interface)
|
interface = os.environ.get('DNSMASQ_INTERFACE', FLAGS.dnsmasq_interface)
|
||||||
if int(os.environ.get('TESTING', '0')):
|
if int(os.environ.get('TESTING', '0')):
|
||||||
from nova.tests import fake_flags
|
from nova.tests import fake_flags
|
||||||
|
|
||||||
|
#if FLAGS.fake_rabbit:
|
||||||
|
# LOG.debug(_("leasing ip"))
|
||||||
|
# network_manager = utils.import_object(FLAGS.network_manager)
|
||||||
|
## reload(fake_flags)
|
||||||
|
# from nova.tests import fake_flags
|
||||||
|
|
||||||
action = argv[1]
|
action = argv[1]
|
||||||
if action in ['add', 'del', 'old']:
|
if action in ['add', 'del', 'old']:
|
||||||
mac = argv[2]
|
mac = argv[2]
|
||||||
|
|||||||
@@ -97,7 +97,7 @@ flags.DECLARE('vlan_start', 'nova.network.manager')
|
|||||||
flags.DECLARE('vpn_start', 'nova.network.manager')
|
flags.DECLARE('vpn_start', 'nova.network.manager')
|
||||||
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
|
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
|
||||||
flags.DECLARE('images_path', 'nova.image.local')
|
flags.DECLARE('images_path', 'nova.image.local')
|
||||||
flags.DECLARE('libvirt_type', 'nova.virt.libvirt_conn')
|
flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection')
|
||||||
flags.DEFINE_flag(flags.HelpFlag())
|
flags.DEFINE_flag(flags.HelpFlag())
|
||||||
flags.DEFINE_flag(flags.HelpshortFlag())
|
flags.DEFINE_flag(flags.HelpshortFlag())
|
||||||
flags.DEFINE_flag(flags.HelpXMLFlag())
|
flags.DEFINE_flag(flags.HelpXMLFlag())
|
||||||
@@ -362,27 +362,47 @@ class ProjectCommands(object):
|
|||||||
def add(self, project_id, user_id):
|
def add(self, project_id, user_id):
|
||||||
"""Adds user to project
|
"""Adds user to project
|
||||||
arguments: project_id user_id"""
|
arguments: project_id user_id"""
|
||||||
self.manager.add_to_project(user_id, project_id)
|
try:
|
||||||
|
self.manager.add_to_project(user_id, project_id)
|
||||||
|
except exception.UserNotFound as ex:
|
||||||
|
print ex
|
||||||
|
raise
|
||||||
|
|
||||||
def create(self, name, project_manager, description=None):
|
def create(self, name, project_manager, description=None):
|
||||||
"""Creates a new project
|
"""Creates a new project
|
||||||
arguments: name project_manager [description]"""
|
arguments: name project_manager [description]"""
|
||||||
self.manager.create_project(name, project_manager, description)
|
try:
|
||||||
|
self.manager.create_project(name, project_manager, description)
|
||||||
|
except exception.UserNotFound as ex:
|
||||||
|
print ex
|
||||||
|
raise
|
||||||
|
|
||||||
def modify(self, name, project_manager, description=None):
|
def modify(self, name, project_manager, description=None):
|
||||||
"""Modifies a project
|
"""Modifies a project
|
||||||
arguments: name project_manager [description]"""
|
arguments: name project_manager [description]"""
|
||||||
self.manager.modify_project(name, project_manager, description)
|
try:
|
||||||
|
self.manager.modify_project(name, project_manager, description)
|
||||||
|
except exception.UserNotFound as ex:
|
||||||
|
print ex
|
||||||
|
raise
|
||||||
|
|
||||||
def delete(self, name):
|
def delete(self, name):
|
||||||
"""Deletes an existing project
|
"""Deletes an existing project
|
||||||
arguments: name"""
|
arguments: name"""
|
||||||
self.manager.delete_project(name)
|
try:
|
||||||
|
self.manager.delete_project(name)
|
||||||
|
except exception.ProjectNotFound as ex:
|
||||||
|
print ex
|
||||||
|
raise
|
||||||
|
|
||||||
def environment(self, project_id, user_id, filename='novarc'):
|
def environment(self, project_id, user_id, filename='novarc'):
|
||||||
"""Exports environment variables to an sourcable file
|
"""Exports environment variables to an sourcable file
|
||||||
arguments: project_id user_id [filename='novarc]"""
|
arguments: project_id user_id [filename='novarc]"""
|
||||||
rc = self.manager.get_environment_rc(user_id, project_id)
|
try:
|
||||||
|
rc = self.manager.get_environment_rc(user_id, project_id)
|
||||||
|
except (exception.UserNotFound, exception.ProjectNotFound) as ex:
|
||||||
|
print ex
|
||||||
|
raise
|
||||||
with open(filename, 'w') as f:
|
with open(filename, 'w') as f:
|
||||||
f.write(rc)
|
f.write(rc)
|
||||||
|
|
||||||
@@ -397,18 +417,26 @@ class ProjectCommands(object):
|
|||||||
arguments: project_id [key] [value]"""
|
arguments: project_id [key] [value]"""
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
if key:
|
if key:
|
||||||
|
if value.lower() == 'unlimited':
|
||||||
|
value = None
|
||||||
try:
|
try:
|
||||||
db.quota_update(ctxt, project_id, key, value)
|
db.quota_update(ctxt, project_id, key, value)
|
||||||
except exception.NotFound:
|
except exception.ProjectQuotaNotFound:
|
||||||
db.quota_create(ctxt, project_id, key, value)
|
db.quota_create(ctxt, project_id, key, value)
|
||||||
project_quota = quota.get_quota(ctxt, project_id)
|
project_quota = quota.get_project_quotas(ctxt, project_id)
|
||||||
for key, value in project_quota.iteritems():
|
for key, value in project_quota.iteritems():
|
||||||
|
if value is None:
|
||||||
|
value = 'unlimited'
|
||||||
print '%s: %s' % (key, value)
|
print '%s: %s' % (key, value)
|
||||||
|
|
||||||
def remove(self, project_id, user_id):
|
def remove(self, project_id, user_id):
|
||||||
"""Removes user from project
|
"""Removes user from project
|
||||||
arguments: project_id user_id"""
|
arguments: project_id user_id"""
|
||||||
self.manager.remove_from_project(user_id, project_id)
|
try:
|
||||||
|
self.manager.remove_from_project(user_id, project_id)
|
||||||
|
except (exception.UserNotFound, exception.ProjectNotFound) as ex:
|
||||||
|
print ex
|
||||||
|
raise
|
||||||
|
|
||||||
def scrub(self, project_id):
|
def scrub(self, project_id):
|
||||||
"""Deletes data associated with project
|
"""Deletes data associated with project
|
||||||
@@ -427,6 +455,9 @@ class ProjectCommands(object):
|
|||||||
zip_file = self.manager.get_credentials(user_id, project_id)
|
zip_file = self.manager.get_credentials(user_id, project_id)
|
||||||
with open(filename, 'w') as f:
|
with open(filename, 'w') as f:
|
||||||
f.write(zip_file)
|
f.write(zip_file)
|
||||||
|
except (exception.UserNotFound, exception.ProjectNotFound) as ex:
|
||||||
|
print ex
|
||||||
|
raise
|
||||||
except db.api.NoMoreNetworks:
|
except db.api.NoMoreNetworks:
|
||||||
print _('No more networks available. If this is a new '
|
print _('No more networks available. If this is a new '
|
||||||
'installation, you need\nto call something like this:\n\n'
|
'installation, you need\nto call something like this:\n\n'
|
||||||
@@ -522,8 +553,10 @@ class NetworkCommands(object):
|
|||||||
[network_size=FLAG], [vlan_start=FLAG],
|
[network_size=FLAG], [vlan_start=FLAG],
|
||||||
[vpn_start=FLAG], [fixed_range_v6=FLAG]"""
|
[vpn_start=FLAG], [fixed_range_v6=FLAG]"""
|
||||||
if not fixed_range:
|
if not fixed_range:
|
||||||
raise TypeError(_('Fixed range in the form of 10.0.0.0/8 is '
|
msg = _('Fixed range in the form of 10.0.0.0/8 is '
|
||||||
'required to create networks.'))
|
'required to create networks.')
|
||||||
|
print msg
|
||||||
|
raise TypeError(msg)
|
||||||
if not num_networks:
|
if not num_networks:
|
||||||
num_networks = FLAGS.num_networks
|
num_networks = FLAGS.num_networks
|
||||||
if not network_size:
|
if not network_size:
|
||||||
@@ -535,14 +568,18 @@ class NetworkCommands(object):
|
|||||||
if not fixed_range_v6:
|
if not fixed_range_v6:
|
||||||
fixed_range_v6 = FLAGS.fixed_range_v6
|
fixed_range_v6 = FLAGS.fixed_range_v6
|
||||||
net_manager = utils.import_object(FLAGS.network_manager)
|
net_manager = utils.import_object(FLAGS.network_manager)
|
||||||
net_manager.create_networks(context.get_admin_context(),
|
try:
|
||||||
cidr=fixed_range,
|
net_manager.create_networks(context.get_admin_context(),
|
||||||
num_networks=int(num_networks),
|
cidr=fixed_range,
|
||||||
network_size=int(network_size),
|
num_networks=int(num_networks),
|
||||||
vlan_start=int(vlan_start),
|
network_size=int(network_size),
|
||||||
vpn_start=int(vpn_start),
|
vlan_start=int(vlan_start),
|
||||||
cidr_v6=fixed_range_v6,
|
vpn_start=int(vpn_start),
|
||||||
label=label)
|
cidr_v6=fixed_range_v6,
|
||||||
|
label=label)
|
||||||
|
except ValueError, e:
|
||||||
|
print e
|
||||||
|
raise e
|
||||||
|
|
||||||
def list(self):
|
def list(self):
|
||||||
"""List all created networks"""
|
"""List all created networks"""
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ def format_help(d):
|
|||||||
indent = MAX_INDENT - 6
|
indent = MAX_INDENT - 6
|
||||||
|
|
||||||
out = []
|
out = []
|
||||||
for k, v in d.iteritems():
|
for k, v in sorted(d.iteritems()):
|
||||||
if (len(k) + 6) > MAX_INDENT:
|
if (len(k) + 6) > MAX_INDENT:
|
||||||
out.extend([' %s' % k])
|
out.extend([' %s' % k])
|
||||||
initial_indent = ' ' * (indent + 6)
|
initial_indent = ' ' * (indent + 6)
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
NOVA_KEY_DIR=$(pushd $(dirname $BASH_SOURCE)>/dev/null; pwd; popd>/dev/null)
|
NOVA_KEY_DIR=$(dirname $(readlink -f ${BASH_SOURCE}))
|
||||||
export EC2_ACCESS_KEY="%(access)s:%(project)s"
|
export EC2_ACCESS_KEY="%(access)s:%(project)s"
|
||||||
export EC2_SECRET_KEY="%(secret)s"
|
export EC2_SECRET_KEY="%(secret)s"
|
||||||
export EC2_URL="%(ec2)s"
|
export EC2_URL="%(ec2)s"
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ LOG = logging.getLogger("nova.fakerabbit")
|
|||||||
|
|
||||||
EXCHANGES = {}
|
EXCHANGES = {}
|
||||||
QUEUES = {}
|
QUEUES = {}
|
||||||
|
CONSUMERS = {}
|
||||||
|
|
||||||
|
|
||||||
class Message(base.BaseMessage):
|
class Message(base.BaseMessage):
|
||||||
@@ -96,17 +97,29 @@ class Backend(base.BaseBackend):
|
|||||||
' key %(routing_key)s') % locals())
|
' key %(routing_key)s') % locals())
|
||||||
EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key)
|
EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key)
|
||||||
|
|
||||||
def declare_consumer(self, queue, callback, *args, **kwargs):
|
def declare_consumer(self, queue, callback, consumer_tag, *args, **kwargs):
|
||||||
self.current_queue = queue
|
global CONSUMERS
|
||||||
self.current_callback = callback
|
LOG.debug("Adding consumer %s", consumer_tag)
|
||||||
|
CONSUMERS[consumer_tag] = (queue, callback)
|
||||||
|
|
||||||
|
def cancel(self, consumer_tag):
|
||||||
|
global CONSUMERS
|
||||||
|
LOG.debug("Removing consumer %s", consumer_tag)
|
||||||
|
del CONSUMERS[consumer_tag]
|
||||||
|
|
||||||
def consume(self, limit=None):
|
def consume(self, limit=None):
|
||||||
|
global CONSUMERS
|
||||||
|
num = 0
|
||||||
while True:
|
while True:
|
||||||
item = self.get(self.current_queue)
|
for (queue, callback) in CONSUMERS.itervalues():
|
||||||
if item:
|
item = self.get(queue)
|
||||||
self.current_callback(item)
|
if item:
|
||||||
raise StopIteration()
|
callback(item)
|
||||||
greenthread.sleep(0)
|
num += 1
|
||||||
|
yield
|
||||||
|
if limit and num == limit:
|
||||||
|
raise StopIteration()
|
||||||
|
greenthread.sleep(0.1)
|
||||||
|
|
||||||
def get(self, queue, no_ack=False):
|
def get(self, queue, no_ack=False):
|
||||||
global QUEUES
|
global QUEUES
|
||||||
@@ -134,5 +147,7 @@ class Backend(base.BaseBackend):
|
|||||||
def reset_all():
|
def reset_all():
|
||||||
global EXCHANGES
|
global EXCHANGES
|
||||||
global QUEUES
|
global QUEUES
|
||||||
|
global CONSUMERS
|
||||||
EXCHANGES = {}
|
EXCHANGES = {}
|
||||||
QUEUES = {}
|
QUEUES = {}
|
||||||
|
CONSUMERS = {}
|
||||||
|
|||||||
@@ -110,7 +110,7 @@ class FlagValues(gflags.FlagValues):
|
|||||||
return name in self.__dict__['__dirty']
|
return name in self.__dict__['__dirty']
|
||||||
|
|
||||||
def ClearDirty(self):
|
def ClearDirty(self):
|
||||||
self.__dict__['__is_dirty'] = []
|
self.__dict__['__dirty'] = []
|
||||||
|
|
||||||
def WasAlreadyParsed(self):
|
def WasAlreadyParsed(self):
|
||||||
return self.__dict__['__was_already_parsed']
|
return self.__dict__['__was_already_parsed']
|
||||||
@@ -119,11 +119,12 @@ class FlagValues(gflags.FlagValues):
|
|||||||
if '__stored_argv' not in self.__dict__:
|
if '__stored_argv' not in self.__dict__:
|
||||||
return
|
return
|
||||||
new_flags = FlagValues(self)
|
new_flags = FlagValues(self)
|
||||||
for k in self.__dict__['__dirty']:
|
for k in self.FlagDict().iterkeys():
|
||||||
new_flags[k] = gflags.FlagValues.__getitem__(self, k)
|
new_flags[k] = gflags.FlagValues.__getitem__(self, k)
|
||||||
|
|
||||||
|
new_flags.Reset()
|
||||||
new_flags(self.__dict__['__stored_argv'])
|
new_flags(self.__dict__['__stored_argv'])
|
||||||
for k in self.__dict__['__dirty']:
|
for k in new_flags.FlagDict().iterkeys():
|
||||||
setattr(self, k, getattr(new_flags, k))
|
setattr(self, k, getattr(new_flags, k))
|
||||||
self.ClearDirty()
|
self.ClearDirty()
|
||||||
|
|
||||||
@@ -369,6 +370,9 @@ DEFINE_string('host', socket.gethostname(),
|
|||||||
DEFINE_string('node_availability_zone', 'nova',
|
DEFINE_string('node_availability_zone', 'nova',
|
||||||
'availability zone of this node')
|
'availability zone of this node')
|
||||||
|
|
||||||
|
DEFINE_string('notification_driver',
|
||||||
|
'nova.notifier.no_op_notifier',
|
||||||
|
'Default driver for sending notifications')
|
||||||
DEFINE_list('memcached_servers', None,
|
DEFINE_list('memcached_servers', None,
|
||||||
'Memcached servers or None for in process cache.')
|
'Memcached servers or None for in process cache.')
|
||||||
|
|
||||||
|
|||||||
14
nova/notifier/__init__.py
Normal file
14
nova/notifier/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
83
nova/notifier/api.py
Normal file
83
nova/notifier/api.py
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.import datetime
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from nova import flags
|
||||||
|
from nova import utils
|
||||||
|
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
flags.DEFINE_string('default_notification_level', 'INFO',
|
||||||
|
'Default notification level for outgoing notifications')
|
||||||
|
|
||||||
|
WARN = 'WARN'
|
||||||
|
INFO = 'INFO'
|
||||||
|
ERROR = 'ERROR'
|
||||||
|
CRITICAL = 'CRITICAL'
|
||||||
|
DEBUG = 'DEBUG'
|
||||||
|
|
||||||
|
log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
|
||||||
|
|
||||||
|
|
||||||
|
class BadPriorityException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def notify(publisher_id, event_type, priority, payload):
|
||||||
|
"""
|
||||||
|
Sends a notification using the specified driver
|
||||||
|
|
||||||
|
Notify parameters:
|
||||||
|
|
||||||
|
publisher_id - the source worker_type.host of the message
|
||||||
|
event_type - the literal type of event (ex. Instance Creation)
|
||||||
|
priority - patterned after the enumeration of Python logging levels in
|
||||||
|
the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
|
||||||
|
payload - A python dictionary of attributes
|
||||||
|
|
||||||
|
Outgoing message format includes the above parameters, and appends the
|
||||||
|
following:
|
||||||
|
|
||||||
|
message_id - a UUID representing the id for this notification
|
||||||
|
timestamp - the GMT timestamp the notification was sent at
|
||||||
|
|
||||||
|
The composite message will be constructed as a dictionary of the above
|
||||||
|
attributes, which will then be sent via the transport mechanism defined
|
||||||
|
by the driver.
|
||||||
|
|
||||||
|
Message example:
|
||||||
|
|
||||||
|
{'message_id': str(uuid.uuid4()),
|
||||||
|
'publisher_id': 'compute.host1',
|
||||||
|
'timestamp': datetime.datetime.utcnow(),
|
||||||
|
'priority': 'WARN',
|
||||||
|
'event_type': 'compute.create_instance',
|
||||||
|
'payload': {'instance_id': 12, ... }}
|
||||||
|
|
||||||
|
"""
|
||||||
|
if priority not in log_levels:
|
||||||
|
raise BadPriorityException(
|
||||||
|
_('%s not in valid priorities' % priority))
|
||||||
|
driver = utils.import_object(FLAGS.notification_driver)
|
||||||
|
msg = dict(message_id=str(uuid.uuid4()),
|
||||||
|
publisher_id=publisher_id,
|
||||||
|
event_type=event_type,
|
||||||
|
priority=priority,
|
||||||
|
payload=payload,
|
||||||
|
timestamp=str(datetime.datetime.utcnow()))
|
||||||
|
driver.notify(msg)
|
||||||
34
nova/notifier/log_notifier.py
Normal file
34
nova/notifier/log_notifier.py
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
from nova import flags
|
||||||
|
from nova import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
def notify(message):
|
||||||
|
"""Notifies the recipient of the desired event given the model.
|
||||||
|
Log notifications using nova's default logging system"""
|
||||||
|
|
||||||
|
priority = message.get('priority',
|
||||||
|
FLAGS.default_notification_level)
|
||||||
|
priority = priority.lower()
|
||||||
|
logger = logging.getLogger(
|
||||||
|
'nova.notification.%s' % message['event_type'])
|
||||||
|
getattr(logger, priority)(json.dumps(message))
|
||||||
@@ -1,7 +1,4 @@
|
|||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
@@ -16,11 +13,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from nova import flags
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
def notify(message):
|
||||||
|
"""Notifies the recipient of the desired event given the model"""
|
||||||
FLAGS.connection_type = 'libvirt'
|
pass
|
||||||
FLAGS.fake_rabbit = False
|
|
||||||
FLAGS.fake_network = False
|
|
||||||
FLAGS.verbose = False
|
|
||||||
36
nova/notifier/rabbit_notifier.py
Normal file
36
nova/notifier/rabbit_notifier.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
|
import nova.context
|
||||||
|
|
||||||
|
from nova import flags
|
||||||
|
from nova import rpc
|
||||||
|
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
flags.DEFINE_string('notification_topic', 'notifications',
|
||||||
|
'RabbitMQ topic used for Nova notifications')
|
||||||
|
|
||||||
|
|
||||||
|
def notify(message):
|
||||||
|
"""Sends a notification to the RabbitMQ"""
|
||||||
|
context = nova.context.get_admin_context()
|
||||||
|
priority = message.get('priority',
|
||||||
|
FLAGS.default_notification_level)
|
||||||
|
priority = priority.lower()
|
||||||
|
topic = '%s.%s' % (FLAGS.notification_topic, priority)
|
||||||
|
rpc.cast(context, topic, message)
|
||||||
271
nova/rpc.py
271
nova/rpc.py
@@ -28,12 +28,15 @@ import json
|
|||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
|
import types
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from carrot import connection as carrot_connection
|
from carrot import connection as carrot_connection
|
||||||
from carrot import messaging
|
from carrot import messaging
|
||||||
from eventlet import greenpool
|
from eventlet import greenpool
|
||||||
from eventlet import greenthread
|
from eventlet import pools
|
||||||
|
from eventlet import queue
|
||||||
|
import greenlet
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import exception
|
from nova import exception
|
||||||
@@ -47,7 +50,10 @@ LOG = logging.getLogger('nova.rpc')
|
|||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool')
|
flags.DEFINE_integer('rpc_thread_pool_size', 1024,
|
||||||
|
'Size of RPC thread pool')
|
||||||
|
flags.DEFINE_integer('rpc_conn_pool_size', 30,
|
||||||
|
'Size of RPC connection pool')
|
||||||
|
|
||||||
|
|
||||||
class Connection(carrot_connection.BrokerConnection):
|
class Connection(carrot_connection.BrokerConnection):
|
||||||
@@ -90,6 +96,22 @@ class Connection(carrot_connection.BrokerConnection):
|
|||||||
return cls.instance()
|
return cls.instance()
|
||||||
|
|
||||||
|
|
||||||
|
class Pool(pools.Pool):
|
||||||
|
"""Class that implements a Pool of Connections."""
|
||||||
|
|
||||||
|
# TODO(comstud): Timeout connections not used in a while
|
||||||
|
def create(self):
|
||||||
|
LOG.debug('Creating new connection')
|
||||||
|
return Connection.instance(new=True)
|
||||||
|
|
||||||
|
# Create a ConnectionPool to use for RPC calls. We'll order the
|
||||||
|
# pool as a stack (LIFO), so that we can potentially loop through and
|
||||||
|
# timeout old unused connections at some point
|
||||||
|
ConnectionPool = Pool(
|
||||||
|
max_size=FLAGS.rpc_conn_pool_size,
|
||||||
|
order_as_stack=True)
|
||||||
|
|
||||||
|
|
||||||
class Consumer(messaging.Consumer):
|
class Consumer(messaging.Consumer):
|
||||||
"""Consumer base class.
|
"""Consumer base class.
|
||||||
|
|
||||||
@@ -131,7 +153,9 @@ class Consumer(messaging.Consumer):
|
|||||||
self.connection = Connection.recreate()
|
self.connection = Connection.recreate()
|
||||||
self.backend = self.connection.create_backend()
|
self.backend = self.connection.create_backend()
|
||||||
self.declare()
|
self.declare()
|
||||||
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
|
return super(Consumer, self).fetch(no_ack,
|
||||||
|
auto_ack,
|
||||||
|
enable_callbacks)
|
||||||
if self.failed_connection:
|
if self.failed_connection:
|
||||||
LOG.error(_('Reconnected to queue'))
|
LOG.error(_('Reconnected to queue'))
|
||||||
self.failed_connection = False
|
self.failed_connection = False
|
||||||
@@ -159,13 +183,13 @@ class AdapterConsumer(Consumer):
|
|||||||
self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size)
|
self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size)
|
||||||
super(AdapterConsumer, self).__init__(connection=connection,
|
super(AdapterConsumer, self).__init__(connection=connection,
|
||||||
topic=topic)
|
topic=topic)
|
||||||
|
self.register_callback(self.process_data)
|
||||||
|
|
||||||
def receive(self, *args, **kwargs):
|
def process_data(self, message_data, message):
|
||||||
self.pool.spawn_n(self._receive, *args, **kwargs)
|
"""Consumer callback to call a method on a proxy object.
|
||||||
|
|
||||||
@exception.wrap_exception
|
Parses the message for validity and fires off a thread to call the
|
||||||
def _receive(self, message_data, message):
|
proxy object method.
|
||||||
"""Magically looks for a method on the proxy object and calls it.
|
|
||||||
|
|
||||||
Message data should be a dictionary with two keys:
|
Message data should be a dictionary with two keys:
|
||||||
method: string representing the method to call
|
method: string representing the method to call
|
||||||
@@ -175,8 +199,8 @@ class AdapterConsumer(Consumer):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
LOG.debug(_('received %s') % message_data)
|
LOG.debug(_('received %s') % message_data)
|
||||||
msg_id = message_data.pop('_msg_id', None)
|
# This will be popped off in _unpack_context
|
||||||
|
msg_id = message_data.get('_msg_id', None)
|
||||||
ctxt = _unpack_context(message_data)
|
ctxt = _unpack_context(message_data)
|
||||||
|
|
||||||
method = message_data.get('method')
|
method = message_data.get('method')
|
||||||
@@ -188,8 +212,17 @@ class AdapterConsumer(Consumer):
|
|||||||
# we just log the message and send an error string
|
# we just log the message and send an error string
|
||||||
# back to the caller
|
# back to the caller
|
||||||
LOG.warn(_('no method for message: %s') % message_data)
|
LOG.warn(_('no method for message: %s') % message_data)
|
||||||
msg_reply(msg_id, _('No method for message: %s') % message_data)
|
if msg_id:
|
||||||
|
msg_reply(msg_id,
|
||||||
|
_('No method for message: %s') % message_data)
|
||||||
return
|
return
|
||||||
|
self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args)
|
||||||
|
|
||||||
|
@exception.wrap_exception
|
||||||
|
def _process_data(self, msg_id, ctxt, method, args):
|
||||||
|
"""Thread that maigcally looks for a method on the proxy
|
||||||
|
object and calls it.
|
||||||
|
"""
|
||||||
|
|
||||||
node_func = getattr(self.proxy, str(method))
|
node_func = getattr(self.proxy, str(method))
|
||||||
node_args = dict((str(k), v) for k, v in args.iteritems())
|
node_args = dict((str(k), v) for k, v in args.iteritems())
|
||||||
@@ -197,7 +230,18 @@ class AdapterConsumer(Consumer):
|
|||||||
try:
|
try:
|
||||||
rval = node_func(context=ctxt, **node_args)
|
rval = node_func(context=ctxt, **node_args)
|
||||||
if msg_id:
|
if msg_id:
|
||||||
msg_reply(msg_id, rval, None)
|
# Check if the result was a generator
|
||||||
|
if isinstance(rval, types.GeneratorType):
|
||||||
|
for x in rval:
|
||||||
|
msg_reply(msg_id, x, None)
|
||||||
|
else:
|
||||||
|
msg_reply(msg_id, rval, None)
|
||||||
|
|
||||||
|
# This final None tells multicall that it is done.
|
||||||
|
msg_reply(msg_id, None, None)
|
||||||
|
elif isinstance(rval, types.GeneratorType):
|
||||||
|
# NOTE(vish): this iterates through the generator
|
||||||
|
list(rval)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.exception('Exception during message handling')
|
logging.exception('Exception during message handling')
|
||||||
if msg_id:
|
if msg_id:
|
||||||
@@ -205,11 +249,6 @@ class AdapterConsumer(Consumer):
|
|||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
class Publisher(messaging.Publisher):
|
|
||||||
"""Publisher base class."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TopicAdapterConsumer(AdapterConsumer):
|
class TopicAdapterConsumer(AdapterConsumer):
|
||||||
"""Consumes messages on a specific topic."""
|
"""Consumes messages on a specific topic."""
|
||||||
|
|
||||||
@@ -242,6 +281,58 @@ class FanoutAdapterConsumer(AdapterConsumer):
|
|||||||
topic=topic, proxy=proxy)
|
topic=topic, proxy=proxy)
|
||||||
|
|
||||||
|
|
||||||
|
class ConsumerSet(object):
|
||||||
|
"""Groups consumers to listen on together on a single connection."""
|
||||||
|
|
||||||
|
def __init__(self, connection, consumer_list):
|
||||||
|
self.consumer_list = set(consumer_list)
|
||||||
|
self.consumer_set = None
|
||||||
|
self.enabled = True
|
||||||
|
self.init(connection)
|
||||||
|
|
||||||
|
def init(self, conn):
|
||||||
|
if not conn:
|
||||||
|
conn = Connection.instance(new=True)
|
||||||
|
if self.consumer_set:
|
||||||
|
self.consumer_set.close()
|
||||||
|
self.consumer_set = messaging.ConsumerSet(conn)
|
||||||
|
for consumer in self.consumer_list:
|
||||||
|
consumer.connection = conn
|
||||||
|
# consumer.backend is set for us
|
||||||
|
self.consumer_set.add_consumer(consumer)
|
||||||
|
|
||||||
|
def reconnect(self):
|
||||||
|
self.init(None)
|
||||||
|
|
||||||
|
def wait(self, limit=None):
|
||||||
|
running = True
|
||||||
|
while running:
|
||||||
|
it = self.consumer_set.iterconsume(limit=limit)
|
||||||
|
if not it:
|
||||||
|
break
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
it.next()
|
||||||
|
except StopIteration:
|
||||||
|
return
|
||||||
|
except greenlet.GreenletExit:
|
||||||
|
running = False
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
LOG.exception(_("Exception while processing consumer"))
|
||||||
|
self.reconnect()
|
||||||
|
# Break to outer loop
|
||||||
|
break
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.consumer_set.close()
|
||||||
|
|
||||||
|
|
||||||
|
class Publisher(messaging.Publisher):
|
||||||
|
"""Publisher base class."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class TopicPublisher(Publisher):
|
class TopicPublisher(Publisher):
|
||||||
"""Publishes messages on a specific topic."""
|
"""Publishes messages on a specific topic."""
|
||||||
|
|
||||||
@@ -306,16 +397,18 @@ def msg_reply(msg_id, reply=None, failure=None):
|
|||||||
LOG.error(_("Returning exception %s to caller"), message)
|
LOG.error(_("Returning exception %s to caller"), message)
|
||||||
LOG.error(tb)
|
LOG.error(tb)
|
||||||
failure = (failure[0].__name__, str(failure[1]), tb)
|
failure = (failure[0].__name__, str(failure[1]), tb)
|
||||||
conn = Connection.instance()
|
|
||||||
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
|
with ConnectionPool.item() as conn:
|
||||||
try:
|
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
|
||||||
publisher.send({'result': reply, 'failure': failure})
|
try:
|
||||||
except TypeError:
|
publisher.send({'result': reply, 'failure': failure})
|
||||||
publisher.send(
|
except TypeError:
|
||||||
{'result': dict((k, repr(v))
|
publisher.send(
|
||||||
for k, v in reply.__dict__.iteritems()),
|
{'result': dict((k, repr(v))
|
||||||
'failure': failure})
|
for k, v in reply.__dict__.iteritems()),
|
||||||
publisher.close()
|
'failure': failure})
|
||||||
|
|
||||||
|
publisher.close()
|
||||||
|
|
||||||
|
|
||||||
class RemoteError(exception.Error):
|
class RemoteError(exception.Error):
|
||||||
@@ -347,8 +440,9 @@ def _unpack_context(msg):
|
|||||||
if key.startswith('_context_'):
|
if key.startswith('_context_'):
|
||||||
value = msg.pop(key)
|
value = msg.pop(key)
|
||||||
context_dict[key[9:]] = value
|
context_dict[key[9:]] = value
|
||||||
|
context_dict['msg_id'] = msg.pop('_msg_id', None)
|
||||||
LOG.debug(_('unpacked context: %s'), context_dict)
|
LOG.debug(_('unpacked context: %s'), context_dict)
|
||||||
return context.RequestContext.from_dict(context_dict)
|
return RpcContext.from_dict(context_dict)
|
||||||
|
|
||||||
|
|
||||||
def _pack_context(msg, context):
|
def _pack_context(msg, context):
|
||||||
@@ -360,70 +454,112 @@ def _pack_context(msg, context):
|
|||||||
for args at some point.
|
for args at some point.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
context = dict([('_context_%s' % key, value)
|
context_d = dict([('_context_%s' % key, value)
|
||||||
for (key, value) in context.to_dict().iteritems()])
|
for (key, value) in context.to_dict().iteritems()])
|
||||||
msg.update(context)
|
msg.update(context_d)
|
||||||
|
|
||||||
|
|
||||||
def call(context, topic, msg):
|
class RpcContext(context.RequestContext):
|
||||||
"""Sends a message on a topic and wait for a response."""
|
def __init__(self, *args, **kwargs):
|
||||||
|
msg_id = kwargs.pop('msg_id', None)
|
||||||
|
self.msg_id = msg_id
|
||||||
|
super(RpcContext, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def reply(self, *args, **kwargs):
|
||||||
|
msg_reply(self.msg_id, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def multicall(context, topic, msg):
|
||||||
|
"""Make a call that returns multiple times."""
|
||||||
LOG.debug(_('Making asynchronous call on %s ...'), topic)
|
LOG.debug(_('Making asynchronous call on %s ...'), topic)
|
||||||
msg_id = uuid.uuid4().hex
|
msg_id = uuid.uuid4().hex
|
||||||
msg.update({'_msg_id': msg_id})
|
msg.update({'_msg_id': msg_id})
|
||||||
LOG.debug(_('MSG_ID is %s') % (msg_id))
|
LOG.debug(_('MSG_ID is %s') % (msg_id))
|
||||||
_pack_context(msg, context)
|
_pack_context(msg, context)
|
||||||
|
|
||||||
class WaitMessage(object):
|
con_conn = ConnectionPool.get()
|
||||||
def __call__(self, data, message):
|
consumer = DirectConsumer(connection=con_conn, msg_id=msg_id)
|
||||||
"""Acks message and sets result."""
|
wait_msg = MulticallWaiter(consumer)
|
||||||
message.ack()
|
|
||||||
if data['failure']:
|
|
||||||
self.result = RemoteError(*data['failure'])
|
|
||||||
else:
|
|
||||||
self.result = data['result']
|
|
||||||
|
|
||||||
wait_msg = WaitMessage()
|
|
||||||
conn = Connection.instance()
|
|
||||||
consumer = DirectConsumer(connection=conn, msg_id=msg_id)
|
|
||||||
consumer.register_callback(wait_msg)
|
consumer.register_callback(wait_msg)
|
||||||
|
|
||||||
conn = Connection.instance()
|
publisher = TopicPublisher(connection=con_conn, topic=topic)
|
||||||
publisher = TopicPublisher(connection=conn, topic=topic)
|
|
||||||
publisher.send(msg)
|
publisher.send(msg)
|
||||||
publisher.close()
|
publisher.close()
|
||||||
|
|
||||||
try:
|
return wait_msg
|
||||||
consumer.wait(limit=1)
|
|
||||||
except StopIteration:
|
|
||||||
pass
|
class MulticallWaiter(object):
|
||||||
consumer.close()
|
def __init__(self, consumer):
|
||||||
# NOTE(termie): this is a little bit of a change from the original
|
self._consumer = consumer
|
||||||
# non-eventlet code where returning a Failure
|
self._results = queue.Queue()
|
||||||
# instance from a deferred call is very similar to
|
self._closed = False
|
||||||
# raising an exception
|
|
||||||
if isinstance(wait_msg.result, Exception):
|
def close(self):
|
||||||
raise wait_msg.result
|
self._closed = True
|
||||||
return wait_msg.result
|
self._consumer.close()
|
||||||
|
ConnectionPool.put(self._consumer.connection)
|
||||||
|
|
||||||
|
def __call__(self, data, message):
|
||||||
|
"""Acks message and sets result."""
|
||||||
|
message.ack()
|
||||||
|
if data['failure']:
|
||||||
|
self._results.put(RemoteError(*data['failure']))
|
||||||
|
else:
|
||||||
|
self._results.put(data['result'])
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self.wait()
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
while True:
|
||||||
|
rv = None
|
||||||
|
while rv is None and not self._closed:
|
||||||
|
try:
|
||||||
|
rv = self._consumer.fetch(enable_callbacks=True)
|
||||||
|
except Exception:
|
||||||
|
self.close()
|
||||||
|
raise
|
||||||
|
time.sleep(0.01)
|
||||||
|
|
||||||
|
result = self._results.get()
|
||||||
|
if isinstance(result, Exception):
|
||||||
|
self.close()
|
||||||
|
raise result
|
||||||
|
if result == None:
|
||||||
|
self.close()
|
||||||
|
raise StopIteration
|
||||||
|
yield result
|
||||||
|
|
||||||
|
|
||||||
|
def call(context, topic, msg):
|
||||||
|
"""Sends a message on a topic and wait for a response."""
|
||||||
|
rv = multicall(context, topic, msg)
|
||||||
|
# NOTE(vish): return the last result from the multicall
|
||||||
|
rv = list(rv)
|
||||||
|
if not rv:
|
||||||
|
return
|
||||||
|
return rv[-1]
|
||||||
|
|
||||||
|
|
||||||
def cast(context, topic, msg):
|
def cast(context, topic, msg):
|
||||||
"""Sends a message on a topic without waiting for a response."""
|
"""Sends a message on a topic without waiting for a response."""
|
||||||
LOG.debug(_('Making asynchronous cast on %s...'), topic)
|
LOG.debug(_('Making asynchronous cast on %s...'), topic)
|
||||||
_pack_context(msg, context)
|
_pack_context(msg, context)
|
||||||
conn = Connection.instance()
|
with ConnectionPool.item() as conn:
|
||||||
publisher = TopicPublisher(connection=conn, topic=topic)
|
publisher = TopicPublisher(connection=conn, topic=topic)
|
||||||
publisher.send(msg)
|
publisher.send(msg)
|
||||||
publisher.close()
|
publisher.close()
|
||||||
|
|
||||||
|
|
||||||
def fanout_cast(context, topic, msg):
|
def fanout_cast(context, topic, msg):
|
||||||
"""Sends a message on a fanout exchange without waiting for a response."""
|
"""Sends a message on a fanout exchange without waiting for a response."""
|
||||||
LOG.debug(_('Making asynchronous fanout cast...'))
|
LOG.debug(_('Making asynchronous fanout cast...'))
|
||||||
_pack_context(msg, context)
|
_pack_context(msg, context)
|
||||||
conn = Connection.instance()
|
with ConnectionPool.item() as conn:
|
||||||
publisher = FanoutPublisher(topic, connection=conn)
|
publisher = FanoutPublisher(topic, connection=conn)
|
||||||
publisher.send(msg)
|
publisher.send(msg)
|
||||||
publisher.close()
|
publisher.close()
|
||||||
|
|
||||||
|
|
||||||
def generic_response(message_data, message):
|
def generic_response(message_data, message):
|
||||||
@@ -459,6 +595,7 @@ def send_message(topic, message, wait=True):
|
|||||||
|
|
||||||
if wait:
|
if wait:
|
||||||
consumer.wait()
|
consumer.wait()
|
||||||
|
consumer.close()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -21,24 +21,24 @@ from nova import flags
|
|||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
flags.DECLARE('volume_driver', 'nova.volume.manager')
|
flags.DECLARE('volume_driver', 'nova.volume.manager')
|
||||||
FLAGS.volume_driver = 'nova.volume.driver.FakeISCSIDriver'
|
FLAGS['volume_driver'].SetDefault('nova.volume.driver.FakeISCSIDriver')
|
||||||
FLAGS.connection_type = 'fake'
|
FLAGS['connection_type'].SetDefault('fake')
|
||||||
FLAGS.fake_rabbit = True
|
FLAGS['fake_rabbit'].SetDefault(True)
|
||||||
flags.DECLARE('auth_driver', 'nova.auth.manager')
|
flags.DECLARE('auth_driver', 'nova.auth.manager')
|
||||||
FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver'
|
FLAGS['auth_driver'].SetDefault('nova.auth.dbdriver.DbDriver')
|
||||||
flags.DECLARE('network_size', 'nova.network.manager')
|
flags.DECLARE('network_size', 'nova.network.manager')
|
||||||
flags.DECLARE('num_networks', 'nova.network.manager')
|
flags.DECLARE('num_networks', 'nova.network.manager')
|
||||||
flags.DECLARE('fake_network', 'nova.network.manager')
|
flags.DECLARE('fake_network', 'nova.network.manager')
|
||||||
FLAGS.network_size = 8
|
FLAGS['network_size'].SetDefault(8)
|
||||||
FLAGS.num_networks = 2
|
FLAGS['num_networks'].SetDefault(2)
|
||||||
FLAGS.fake_network = True
|
FLAGS['fake_network'].SetDefault(True)
|
||||||
FLAGS.image_service = 'nova.image.local.LocalImageService'
|
FLAGS['image_service'].SetDefault('nova.image.local.LocalImageService')
|
||||||
flags.DECLARE('num_shelves', 'nova.volume.driver')
|
flags.DECLARE('num_shelves', 'nova.volume.driver')
|
||||||
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
|
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
|
||||||
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
|
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
|
||||||
FLAGS.num_shelves = 2
|
FLAGS['num_shelves'].SetDefault(2)
|
||||||
FLAGS.blades_per_shelf = 4
|
FLAGS['blades_per_shelf'].SetDefault(4)
|
||||||
FLAGS.iscsi_num_targets = 8
|
FLAGS['iscsi_num_targets'].SetDefault(8)
|
||||||
FLAGS.verbose = True
|
FLAGS['verbose'].SetDefault(True)
|
||||||
FLAGS.sqlite_db = "tests.sqlite"
|
FLAGS['sqlite_db'].SetDefault("tests.sqlite")
|
||||||
FLAGS.use_ipv6 = True
|
FLAGS['use_ipv6'].SetDefault(True)
|
||||||
|
|||||||
1
nova/tests/public_key/dummy.fingerprint
Normal file
1
nova/tests/public_key/dummy.fingerprint
Normal file
@@ -0,0 +1 @@
|
|||||||
|
1c:87:d1:d9:32:fd:62:3c:78:2b:c0:ad:c0:15:88:df
|
||||||
1
nova/tests/public_key/dummy.pub
Normal file
1
nova/tests/public_key/dummy.pub
Normal file
@@ -0,0 +1 @@
|
|||||||
|
ssh-dss AAAAB3NzaC1kc3MAAACBAMGJlY9XEIm2X234pdO5yFWMp2JuOQx8U0E815IVXhmKxYCBK9ZakgZOIQmPbXoGYyV+mziDPp6HJ0wKYLQxkwLEFr51fAZjWQvRss0SinURRuLkockDfGFtD4pYJthekr/rlqMKlBSDUSpGq8jUWW60UJ18FGooFpxR7ESqQRx/AAAAFQC96LRglaUeeP+E8U/yblEJocuiWwAAAIA3XiMR8Skiz/0aBm5K50SeQznQuMJTyzt9S9uaz5QZWiFu69hOyGSFGw8fqgxEkXFJIuHobQQpGYQubLW0NdaYRqyE/Vud3JUJUb8Texld6dz8vGemyB5d1YvtSeHIo8/BGv2msOqR3u5AZTaGCBD9DhpSGOKHEdNjTtvpPd8S8gAAAIBociGZ5jf09iHLVENhyXujJbxfGRPsyNTyARJfCOGl0oFV6hEzcQyw8U/ePwjgvjc2UizMWLl8tsb2FXKHRdc2v+ND3Us+XqKQ33X3ADP4FZ/+Oj213gMyhCmvFTP0u5FmHog9My4CB7YcIWRuUR42WlhQ2IfPvKwUoTk3R+T6Og== www-data@mk
|
||||||
@@ -224,6 +224,29 @@ class ApiEc2TestCase(test.TestCase):
|
|||||||
self.manager.delete_project(project)
|
self.manager.delete_project(project)
|
||||||
self.manager.delete_user(user)
|
self.manager.delete_user(user)
|
||||||
|
|
||||||
|
def test_create_duplicate_key_pair(self):
|
||||||
|
"""Test that, after successfully generating a keypair,
|
||||||
|
requesting a second keypair with the same name fails sanely"""
|
||||||
|
self.expect_http()
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
|
||||||
|
for x in range(random.randint(4, 8)))
|
||||||
|
user = self.manager.create_user('fake', 'fake', 'fake')
|
||||||
|
project = self.manager.create_project('fake', 'fake', 'fake')
|
||||||
|
# NOTE(vish): create depends on pool, so call helper directly
|
||||||
|
self.ec2.create_key_pair('test')
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.ec2.create_key_pair('test')
|
||||||
|
except EC2ResponseError, e:
|
||||||
|
if e.code == 'KeyPairExists':
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
self.fail("Unexpected EC2ResponseError: %s "
|
||||||
|
"(expected KeyPairExists)" % e.code)
|
||||||
|
else:
|
||||||
|
self.fail('Exception not raised.')
|
||||||
|
|
||||||
def test_get_all_security_groups(self):
|
def test_get_all_security_groups(self):
|
||||||
"""Test that we can retrieve security groups"""
|
"""Test that we can retrieve security groups"""
|
||||||
self.expect_http()
|
self.expect_http()
|
||||||
|
|||||||
@@ -17,13 +17,9 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from base64 import b64decode
|
from base64 import b64decode
|
||||||
import json
|
|
||||||
from M2Crypto import BIO
|
from M2Crypto import BIO
|
||||||
from M2Crypto import RSA
|
from M2Crypto import RSA
|
||||||
import os
|
import os
|
||||||
import shutil
|
|
||||||
import tempfile
|
|
||||||
import time
|
|
||||||
|
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
|
|
||||||
@@ -33,12 +29,10 @@ from nova import db
|
|||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
from nova import service
|
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.compute import power_state
|
|
||||||
from nova.api.ec2 import cloud
|
from nova.api.ec2 import cloud
|
||||||
from nova.api.ec2 import ec2utils
|
from nova.api.ec2 import ec2utils
|
||||||
from nova.image import local
|
from nova.image import local
|
||||||
@@ -79,14 +73,21 @@ class CloudTestCase(test.TestCase):
|
|||||||
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
self.stubs.Set(local.LocalImageService, 'show', fake_show)
|
||||||
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
|
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
|
||||||
|
|
||||||
|
# NOTE(vish): set up a manual wait so rpc.cast has a chance to finish
|
||||||
|
rpc_cast = rpc.cast
|
||||||
|
|
||||||
|
def finish_cast(*args, **kwargs):
|
||||||
|
rpc_cast(*args, **kwargs)
|
||||||
|
greenthread.sleep(0.2)
|
||||||
|
|
||||||
|
self.stubs.Set(rpc, 'cast', finish_cast)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
network_ref = db.project_get_network(self.context,
|
network_ref = db.project_get_network(self.context,
|
||||||
self.project.id)
|
self.project.id)
|
||||||
db.network_disassociate(self.context, network_ref['id'])
|
db.network_disassociate(self.context, network_ref['id'])
|
||||||
self.manager.delete_project(self.project)
|
self.manager.delete_project(self.project)
|
||||||
self.manager.delete_user(self.user)
|
self.manager.delete_user(self.user)
|
||||||
self.compute.kill()
|
|
||||||
self.network.kill()
|
|
||||||
super(CloudTestCase, self).tearDown()
|
super(CloudTestCase, self).tearDown()
|
||||||
|
|
||||||
def _create_key(self, name):
|
def _create_key(self, name):
|
||||||
@@ -113,7 +114,6 @@ class CloudTestCase(test.TestCase):
|
|||||||
self.cloud.describe_addresses(self.context)
|
self.cloud.describe_addresses(self.context)
|
||||||
self.cloud.release_address(self.context,
|
self.cloud.release_address(self.context,
|
||||||
public_ip=address)
|
public_ip=address)
|
||||||
greenthread.sleep(0.3)
|
|
||||||
db.floating_ip_destroy(self.context, address)
|
db.floating_ip_destroy(self.context, address)
|
||||||
|
|
||||||
def test_associate_disassociate_address(self):
|
def test_associate_disassociate_address(self):
|
||||||
@@ -129,12 +129,10 @@ class CloudTestCase(test.TestCase):
|
|||||||
self.cloud.associate_address(self.context,
|
self.cloud.associate_address(self.context,
|
||||||
instance_id=ec2_id,
|
instance_id=ec2_id,
|
||||||
public_ip=address)
|
public_ip=address)
|
||||||
greenthread.sleep(0.3)
|
|
||||||
self.cloud.disassociate_address(self.context,
|
self.cloud.disassociate_address(self.context,
|
||||||
public_ip=address)
|
public_ip=address)
|
||||||
self.cloud.release_address(self.context,
|
self.cloud.release_address(self.context,
|
||||||
public_ip=address)
|
public_ip=address)
|
||||||
greenthread.sleep(0.3)
|
|
||||||
self.network.deallocate_fixed_ip(self.context, fixed)
|
self.network.deallocate_fixed_ip(self.context, fixed)
|
||||||
db.instance_destroy(self.context, inst['id'])
|
db.instance_destroy(self.context, inst['id'])
|
||||||
db.floating_ip_destroy(self.context, address)
|
db.floating_ip_destroy(self.context, address)
|
||||||
@@ -171,6 +169,25 @@ class CloudTestCase(test.TestCase):
|
|||||||
db.volume_destroy(self.context, vol1['id'])
|
db.volume_destroy(self.context, vol1['id'])
|
||||||
db.volume_destroy(self.context, vol2['id'])
|
db.volume_destroy(self.context, vol2['id'])
|
||||||
|
|
||||||
|
def test_create_volume_from_snapshot(self):
|
||||||
|
"""Makes sure create_volume works when we specify a snapshot."""
|
||||||
|
vol = db.volume_create(self.context, {'size': 1})
|
||||||
|
snap = db.snapshot_create(self.context, {'volume_id': vol['id'],
|
||||||
|
'volume_size': vol['size'],
|
||||||
|
'status': "available"})
|
||||||
|
snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x')
|
||||||
|
|
||||||
|
result = self.cloud.create_volume(self.context,
|
||||||
|
snapshot_id=snapshot_id)
|
||||||
|
volume_id = result['volumeId']
|
||||||
|
result = self.cloud.describe_volumes(self.context)
|
||||||
|
self.assertEqual(len(result['volumeSet']), 2)
|
||||||
|
self.assertEqual(result['volumeSet'][1]['volumeId'], volume_id)
|
||||||
|
|
||||||
|
db.volume_destroy(self.context, ec2utils.ec2_id_to_id(volume_id))
|
||||||
|
db.snapshot_destroy(self.context, snap['id'])
|
||||||
|
db.volume_destroy(self.context, vol['id'])
|
||||||
|
|
||||||
def test_describe_availability_zones(self):
|
def test_describe_availability_zones(self):
|
||||||
"""Makes sure describe_availability_zones works and filters results."""
|
"""Makes sure describe_availability_zones works and filters results."""
|
||||||
service1 = db.service_create(self.context, {'host': 'host1_zones',
|
service1 = db.service_create(self.context, {'host': 'host1_zones',
|
||||||
@@ -188,6 +205,52 @@ class CloudTestCase(test.TestCase):
|
|||||||
db.service_destroy(self.context, service1['id'])
|
db.service_destroy(self.context, service1['id'])
|
||||||
db.service_destroy(self.context, service2['id'])
|
db.service_destroy(self.context, service2['id'])
|
||||||
|
|
||||||
|
def test_describe_snapshots(self):
|
||||||
|
"""Makes sure describe_snapshots works and filters results."""
|
||||||
|
vol = db.volume_create(self.context, {})
|
||||||
|
snap1 = db.snapshot_create(self.context, {'volume_id': vol['id']})
|
||||||
|
snap2 = db.snapshot_create(self.context, {'volume_id': vol['id']})
|
||||||
|
result = self.cloud.describe_snapshots(self.context)
|
||||||
|
self.assertEqual(len(result['snapshotSet']), 2)
|
||||||
|
snapshot_id = ec2utils.id_to_ec2_id(snap2['id'], 'snap-%08x')
|
||||||
|
result = self.cloud.describe_snapshots(self.context,
|
||||||
|
snapshot_id=[snapshot_id])
|
||||||
|
self.assertEqual(len(result['snapshotSet']), 1)
|
||||||
|
self.assertEqual(
|
||||||
|
ec2utils.ec2_id_to_id(result['snapshotSet'][0]['snapshotId']),
|
||||||
|
snap2['id'])
|
||||||
|
db.snapshot_destroy(self.context, snap1['id'])
|
||||||
|
db.snapshot_destroy(self.context, snap2['id'])
|
||||||
|
db.volume_destroy(self.context, vol['id'])
|
||||||
|
|
||||||
|
def test_create_snapshot(self):
|
||||||
|
"""Makes sure create_snapshot works."""
|
||||||
|
vol = db.volume_create(self.context, {'status': "available"})
|
||||||
|
volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x')
|
||||||
|
|
||||||
|
result = self.cloud.create_snapshot(self.context,
|
||||||
|
volume_id=volume_id)
|
||||||
|
snapshot_id = result['snapshotId']
|
||||||
|
result = self.cloud.describe_snapshots(self.context)
|
||||||
|
self.assertEqual(len(result['snapshotSet']), 1)
|
||||||
|
self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id)
|
||||||
|
|
||||||
|
db.snapshot_destroy(self.context, ec2utils.ec2_id_to_id(snapshot_id))
|
||||||
|
db.volume_destroy(self.context, vol['id'])
|
||||||
|
|
||||||
|
def test_delete_snapshot(self):
|
||||||
|
"""Makes sure delete_snapshot works."""
|
||||||
|
vol = db.volume_create(self.context, {'status': "available"})
|
||||||
|
snap = db.snapshot_create(self.context, {'volume_id': vol['id'],
|
||||||
|
'status': "available"})
|
||||||
|
snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x')
|
||||||
|
|
||||||
|
result = self.cloud.delete_snapshot(self.context,
|
||||||
|
snapshot_id=snapshot_id)
|
||||||
|
self.assertTrue(result)
|
||||||
|
|
||||||
|
db.volume_destroy(self.context, vol['id'])
|
||||||
|
|
||||||
def test_describe_instances(self):
|
def test_describe_instances(self):
|
||||||
"""Makes sure describe_instances works and filters results."""
|
"""Makes sure describe_instances works and filters results."""
|
||||||
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||||
@@ -306,31 +369,25 @@ class CloudTestCase(test.TestCase):
|
|||||||
'instance_type': instance_type,
|
'instance_type': instance_type,
|
||||||
'max_count': max_count}
|
'max_count': max_count}
|
||||||
rv = self.cloud.run_instances(self.context, **kwargs)
|
rv = self.cloud.run_instances(self.context, **kwargs)
|
||||||
greenthread.sleep(0.3)
|
|
||||||
instance_id = rv['instancesSet'][0]['instanceId']
|
instance_id = rv['instancesSet'][0]['instanceId']
|
||||||
output = self.cloud.get_console_output(context=self.context,
|
output = self.cloud.get_console_output(context=self.context,
|
||||||
instance_id=[instance_id])
|
instance_id=[instance_id])
|
||||||
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT')
|
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT')
|
||||||
# TODO(soren): We need this until we can stop polling in the rpc code
|
# TODO(soren): We need this until we can stop polling in the rpc code
|
||||||
# for unit tests.
|
# for unit tests.
|
||||||
greenthread.sleep(0.3)
|
|
||||||
rv = self.cloud.terminate_instances(self.context, [instance_id])
|
rv = self.cloud.terminate_instances(self.context, [instance_id])
|
||||||
greenthread.sleep(0.3)
|
|
||||||
|
|
||||||
def test_ajax_console(self):
|
def test_ajax_console(self):
|
||||||
kwargs = {'image_id': 'ami-1'}
|
kwargs = {'image_id': 'ami-1'}
|
||||||
rv = self.cloud.run_instances(self.context, **kwargs)
|
rv = self.cloud.run_instances(self.context, **kwargs)
|
||||||
instance_id = rv['instancesSet'][0]['instanceId']
|
instance_id = rv['instancesSet'][0]['instanceId']
|
||||||
greenthread.sleep(0.3)
|
|
||||||
output = self.cloud.get_ajax_console(context=self.context,
|
output = self.cloud.get_ajax_console(context=self.context,
|
||||||
instance_id=[instance_id])
|
instance_id=[instance_id])
|
||||||
self.assertEquals(output['url'],
|
self.assertEquals(output['url'],
|
||||||
'%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url)
|
'%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url)
|
||||||
# TODO(soren): We need this until we can stop polling in the rpc code
|
# TODO(soren): We need this until we can stop polling in the rpc code
|
||||||
# for unit tests.
|
# for unit tests.
|
||||||
greenthread.sleep(0.3)
|
|
||||||
rv = self.cloud.terminate_instances(self.context, [instance_id])
|
rv = self.cloud.terminate_instances(self.context, [instance_id])
|
||||||
greenthread.sleep(0.3)
|
|
||||||
|
|
||||||
def test_key_generation(self):
|
def test_key_generation(self):
|
||||||
result = self._create_key('test')
|
result = self._create_key('test')
|
||||||
@@ -354,6 +411,36 @@ class CloudTestCase(test.TestCase):
|
|||||||
self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
|
self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
|
||||||
self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
|
self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
|
||||||
|
|
||||||
|
def test_import_public_key(self):
|
||||||
|
# test when user provides all values
|
||||||
|
result1 = self.cloud.import_public_key(self.context,
|
||||||
|
'testimportkey1',
|
||||||
|
'mytestpubkey',
|
||||||
|
'mytestfprint')
|
||||||
|
self.assertTrue(result1)
|
||||||
|
keydata = db.key_pair_get(self.context,
|
||||||
|
self.context.user.id,
|
||||||
|
'testimportkey1')
|
||||||
|
self.assertEqual('mytestpubkey', keydata['public_key'])
|
||||||
|
self.assertEqual('mytestfprint', keydata['fingerprint'])
|
||||||
|
# test when user omits fingerprint
|
||||||
|
pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
|
||||||
|
f = open(pubkey_path + '/dummy.pub', 'r')
|
||||||
|
dummypub = f.readline().rstrip()
|
||||||
|
f.close
|
||||||
|
f = open(pubkey_path + '/dummy.fingerprint', 'r')
|
||||||
|
dummyfprint = f.readline().rstrip()
|
||||||
|
f.close
|
||||||
|
result2 = self.cloud.import_public_key(self.context,
|
||||||
|
'testimportkey2',
|
||||||
|
dummypub)
|
||||||
|
self.assertTrue(result2)
|
||||||
|
keydata = db.key_pair_get(self.context,
|
||||||
|
self.context.user.id,
|
||||||
|
'testimportkey2')
|
||||||
|
self.assertEqual(dummypub, keydata['public_key'])
|
||||||
|
self.assertEqual(dummyfprint, keydata['fingerprint'])
|
||||||
|
|
||||||
def test_delete_key_pair(self):
|
def test_delete_key_pair(self):
|
||||||
self._create_key('test')
|
self._create_key('test')
|
||||||
self.cloud.delete_key_pair(self.context, 'test')
|
self.cloud.delete_key_pair(self.context, 'test')
|
||||||
|
|||||||
@@ -334,6 +334,28 @@ class ComputeTestCase(test.TestCase):
|
|||||||
|
|
||||||
self.compute.terminate_instance(self.context, instance_id)
|
self.compute.terminate_instance(self.context, instance_id)
|
||||||
|
|
||||||
|
def test_finish_resize(self):
|
||||||
|
"""Contrived test to ensure finish_resize doesn't raise anything"""
|
||||||
|
|
||||||
|
def fake(*args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.stubs.Set(self.compute.driver, 'finish_resize', fake)
|
||||||
|
context = self.context.elevated()
|
||||||
|
instance_id = self._create_instance()
|
||||||
|
self.compute.prep_resize(context, instance_id, 1)
|
||||||
|
migration_ref = db.migration_get_by_instance_and_status(context,
|
||||||
|
instance_id, 'pre-migrating')
|
||||||
|
try:
|
||||||
|
self.compute.finish_resize(context, instance_id,
|
||||||
|
int(migration_ref['id']), {})
|
||||||
|
except KeyError, e:
|
||||||
|
# Only catch key errors. We want other reasons for the test to
|
||||||
|
# fail to actually error out so we don't obscure anything
|
||||||
|
self.fail()
|
||||||
|
|
||||||
|
self.compute.terminate_instance(self.context, instance_id)
|
||||||
|
|
||||||
def test_resize_instance(self):
|
def test_resize_instance(self):
|
||||||
"""Ensure instance can be migrated/resized"""
|
"""Ensure instance can be migrated/resized"""
|
||||||
instance_id = self._create_instance()
|
instance_id = self._create_instance()
|
||||||
|
|||||||
@@ -91,6 +91,20 @@ class FlagsTestCase(test.TestCase):
|
|||||||
self.assert_('runtime_answer' in self.global_FLAGS)
|
self.assert_('runtime_answer' in self.global_FLAGS)
|
||||||
self.assertEqual(self.global_FLAGS.runtime_answer, 60)
|
self.assertEqual(self.global_FLAGS.runtime_answer, 60)
|
||||||
|
|
||||||
|
def test_long_vs_short_flags(self):
|
||||||
|
flags.DEFINE_string('duplicate_answer_long', 'val', 'desc',
|
||||||
|
flag_values=self.global_FLAGS)
|
||||||
|
argv = ['flags_test', '--duplicate_answer=60', 'extra_arg']
|
||||||
|
args = self.global_FLAGS(argv)
|
||||||
|
|
||||||
|
self.assert_('duplicate_answer' not in self.global_FLAGS)
|
||||||
|
self.assert_(self.global_FLAGS.duplicate_answer_long, 60)
|
||||||
|
|
||||||
|
flags.DEFINE_integer('duplicate_answer', 60, 'desc',
|
||||||
|
flag_values=self.global_FLAGS)
|
||||||
|
self.assertEqual(self.global_FLAGS.duplicate_answer, 60)
|
||||||
|
self.assertEqual(self.global_FLAGS.duplicate_answer_long, 'val')
|
||||||
|
|
||||||
def test_flag_leak_left(self):
|
def test_flag_leak_left(self):
|
||||||
self.assertEqual(FLAGS.flags_unittest, 'foo')
|
self.assertEqual(FLAGS.flags_unittest, 'foo')
|
||||||
FLAGS.flags_unittest = 'bar'
|
FLAGS.flags_unittest = 'bar'
|
||||||
|
|||||||
@@ -32,7 +32,8 @@ from nova import utils
|
|||||||
from nova.api.ec2 import cloud
|
from nova.api.ec2 import cloud
|
||||||
from nova.auth import manager
|
from nova.auth import manager
|
||||||
from nova.compute import power_state
|
from nova.compute import power_state
|
||||||
from nova.virt import libvirt_conn
|
from nova.virt.libvirt import connection
|
||||||
|
from nova.virt.libvirt import firewall
|
||||||
|
|
||||||
libvirt = None
|
libvirt = None
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
@@ -83,7 +84,7 @@ class CacheConcurrencyTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_same_fname_concurrency(self):
|
def test_same_fname_concurrency(self):
|
||||||
"""Ensures that the same fname cache runs at a sequentially"""
|
"""Ensures that the same fname cache runs at a sequentially"""
|
||||||
conn = libvirt_conn.LibvirtConnection
|
conn = connection.LibvirtConnection
|
||||||
wait1 = eventlet.event.Event()
|
wait1 = eventlet.event.Event()
|
||||||
done1 = eventlet.event.Event()
|
done1 = eventlet.event.Event()
|
||||||
eventlet.spawn(conn._cache_image, _concurrency,
|
eventlet.spawn(conn._cache_image, _concurrency,
|
||||||
@@ -104,7 +105,7 @@ class CacheConcurrencyTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_different_fname_concurrency(self):
|
def test_different_fname_concurrency(self):
|
||||||
"""Ensures that two different fname caches are concurrent"""
|
"""Ensures that two different fname caches are concurrent"""
|
||||||
conn = libvirt_conn.LibvirtConnection
|
conn = connection.LibvirtConnection
|
||||||
wait1 = eventlet.event.Event()
|
wait1 = eventlet.event.Event()
|
||||||
done1 = eventlet.event.Event()
|
done1 = eventlet.event.Event()
|
||||||
eventlet.spawn(conn._cache_image, _concurrency,
|
eventlet.spawn(conn._cache_image, _concurrency,
|
||||||
@@ -125,7 +126,7 @@ class CacheConcurrencyTestCase(test.TestCase):
|
|||||||
class LibvirtConnTestCase(test.TestCase):
|
class LibvirtConnTestCase(test.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(LibvirtConnTestCase, self).setUp()
|
super(LibvirtConnTestCase, self).setUp()
|
||||||
libvirt_conn._late_load_cheetah()
|
connection._late_load_cheetah()
|
||||||
self.flags(fake_call=True)
|
self.flags(fake_call=True)
|
||||||
self.manager = manager.AuthManager()
|
self.manager = manager.AuthManager()
|
||||||
|
|
||||||
@@ -171,8 +172,8 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
return False
|
return False
|
||||||
global libvirt
|
global libvirt
|
||||||
libvirt = __import__('libvirt')
|
libvirt = __import__('libvirt')
|
||||||
libvirt_conn.libvirt = __import__('libvirt')
|
connection.libvirt = __import__('libvirt')
|
||||||
libvirt_conn.libxml2 = __import__('libxml2')
|
connection.libxml2 = __import__('libxml2')
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def create_fake_libvirt_mock(self, **kwargs):
|
def create_fake_libvirt_mock(self, **kwargs):
|
||||||
@@ -182,7 +183,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
class FakeLibvirtConnection(object):
|
class FakeLibvirtConnection(object):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# A fake libvirt_conn.IptablesFirewallDriver
|
# A fake connection.IptablesFirewallDriver
|
||||||
class FakeIptablesFirewallDriver(object):
|
class FakeIptablesFirewallDriver(object):
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
@@ -198,11 +199,11 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
for key, val in kwargs.items():
|
for key, val in kwargs.items():
|
||||||
fake.__setattr__(key, val)
|
fake.__setattr__(key, val)
|
||||||
|
|
||||||
# Inevitable mocks for libvirt_conn.LibvirtConnection
|
# Inevitable mocks for connection.LibvirtConnection
|
||||||
self.mox.StubOutWithMock(libvirt_conn.utils, 'import_class')
|
self.mox.StubOutWithMock(connection.utils, 'import_class')
|
||||||
libvirt_conn.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip)
|
connection.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip)
|
||||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn')
|
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
|
||||||
libvirt_conn.LibvirtConnection._conn = fake
|
connection.LibvirtConnection._conn = fake
|
||||||
|
|
||||||
def create_service(self, **kwargs):
|
def create_service(self, **kwargs):
|
||||||
service_ref = {'host': kwargs.get('host', 'dummy'),
|
service_ref = {'host': kwargs.get('host', 'dummy'),
|
||||||
@@ -214,7 +215,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
return db.service_create(context.get_admin_context(), service_ref)
|
return db.service_create(context.get_admin_context(), service_ref)
|
||||||
|
|
||||||
def test_preparing_xml_info(self):
|
def test_preparing_xml_info(self):
|
||||||
conn = libvirt_conn.LibvirtConnection(True)
|
conn = connection.LibvirtConnection(True)
|
||||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||||
|
|
||||||
result = conn._prepare_xml_info(instance_ref, False)
|
result = conn._prepare_xml_info(instance_ref, False)
|
||||||
@@ -229,7 +230,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
self.assertTrue(len(result['nics']) == 2)
|
self.assertTrue(len(result['nics']) == 2)
|
||||||
|
|
||||||
def test_get_nic_for_xml_v4(self):
|
def test_get_nic_for_xml_v4(self):
|
||||||
conn = libvirt_conn.LibvirtConnection(True)
|
conn = connection.LibvirtConnection(True)
|
||||||
network, mapping = _create_network_info()[0]
|
network, mapping = _create_network_info()[0]
|
||||||
self.flags(use_ipv6=False)
|
self.flags(use_ipv6=False)
|
||||||
params = conn._get_nic_for_xml(network, mapping)['extra_params']
|
params = conn._get_nic_for_xml(network, mapping)['extra_params']
|
||||||
@@ -237,7 +238,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
self.assertTrue(params.find('PROJMASKV6') == -1)
|
self.assertTrue(params.find('PROJMASKV6') == -1)
|
||||||
|
|
||||||
def test_get_nic_for_xml_v6(self):
|
def test_get_nic_for_xml_v6(self):
|
||||||
conn = libvirt_conn.LibvirtConnection(True)
|
conn = connection.LibvirtConnection(True)
|
||||||
network, mapping = _create_network_info()[0]
|
network, mapping = _create_network_info()[0]
|
||||||
self.flags(use_ipv6=True)
|
self.flags(use_ipv6=True)
|
||||||
params = conn._get_nic_for_xml(network, mapping)['extra_params']
|
params = conn._get_nic_for_xml(network, mapping)['extra_params']
|
||||||
@@ -282,7 +283,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
def test_multi_nic(self):
|
def test_multi_nic(self):
|
||||||
instance_data = dict(self.test_instance)
|
instance_data = dict(self.test_instance)
|
||||||
network_info = _create_network_info(2)
|
network_info = _create_network_info(2)
|
||||||
conn = libvirt_conn.LibvirtConnection(True)
|
conn = connection.LibvirtConnection(True)
|
||||||
instance_ref = db.instance_create(self.context, instance_data)
|
instance_ref = db.instance_create(self.context, instance_data)
|
||||||
xml = conn.to_xml(instance_ref, False, network_info)
|
xml = conn.to_xml(instance_ref, False, network_info)
|
||||||
tree = xml_to_tree(xml)
|
tree = xml_to_tree(xml)
|
||||||
@@ -313,7 +314,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
'instance_id': instance_ref['id']})
|
'instance_id': instance_ref['id']})
|
||||||
|
|
||||||
self.flags(libvirt_type='lxc')
|
self.flags(libvirt_type='lxc')
|
||||||
conn = libvirt_conn.LibvirtConnection(True)
|
conn = connection.LibvirtConnection(True)
|
||||||
|
|
||||||
uri = conn.get_uri()
|
uri = conn.get_uri()
|
||||||
self.assertEquals(uri, 'lxc:///')
|
self.assertEquals(uri, 'lxc:///')
|
||||||
@@ -419,7 +420,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
|
|
||||||
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
||||||
FLAGS.libvirt_type = libvirt_type
|
FLAGS.libvirt_type = libvirt_type
|
||||||
conn = libvirt_conn.LibvirtConnection(True)
|
conn = connection.LibvirtConnection(True)
|
||||||
|
|
||||||
uri = conn.get_uri()
|
uri = conn.get_uri()
|
||||||
self.assertEquals(uri, expected_uri)
|
self.assertEquals(uri, expected_uri)
|
||||||
@@ -446,7 +447,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
FLAGS.libvirt_uri = testuri
|
FLAGS.libvirt_uri = testuri
|
||||||
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
||||||
FLAGS.libvirt_type = libvirt_type
|
FLAGS.libvirt_type = libvirt_type
|
||||||
conn = libvirt_conn.LibvirtConnection(True)
|
conn = connection.LibvirtConnection(True)
|
||||||
uri = conn.get_uri()
|
uri = conn.get_uri()
|
||||||
self.assertEquals(uri, testuri)
|
self.assertEquals(uri, testuri)
|
||||||
db.instance_destroy(user_context, instance_ref['id'])
|
db.instance_destroy(user_context, instance_ref['id'])
|
||||||
@@ -470,13 +471,13 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
self.create_fake_libvirt_mock(getVersion=getVersion,
|
self.create_fake_libvirt_mock(getVersion=getVersion,
|
||||||
getType=getType,
|
getType=getType,
|
||||||
listDomainsID=listDomainsID)
|
listDomainsID=listDomainsID)
|
||||||
self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
|
self.mox.StubOutWithMock(connection.LibvirtConnection,
|
||||||
'get_cpu_info')
|
'get_cpu_info')
|
||||||
libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo')
|
connection.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo')
|
||||||
|
|
||||||
# Start test
|
# Start test
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
conn = libvirt_conn.LibvirtConnection(False)
|
conn = connection.LibvirtConnection(False)
|
||||||
conn.update_available_resource(self.context, 'dummy')
|
conn.update_available_resource(self.context, 'dummy')
|
||||||
service_ref = db.service_get(self.context, service_ref['id'])
|
service_ref = db.service_get(self.context, service_ref['id'])
|
||||||
compute_node = service_ref['compute_node'][0]
|
compute_node = service_ref['compute_node'][0]
|
||||||
@@ -510,7 +511,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
self.create_fake_libvirt_mock()
|
self.create_fake_libvirt_mock()
|
||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
conn = libvirt_conn.LibvirtConnection(False)
|
conn = connection.LibvirtConnection(False)
|
||||||
self.assertRaises(exception.ComputeServiceUnavailable,
|
self.assertRaises(exception.ComputeServiceUnavailable,
|
||||||
conn.update_available_resource,
|
conn.update_available_resource,
|
||||||
self.context, 'dummy')
|
self.context, 'dummy')
|
||||||
@@ -545,7 +546,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
# Start test
|
# Start test
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
try:
|
try:
|
||||||
conn = libvirt_conn.LibvirtConnection(False)
|
conn = connection.LibvirtConnection(False)
|
||||||
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
|
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
|
||||||
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
|
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
|
||||||
conn.firewall_driver.setattr('instance_filter_exists', fake_none)
|
conn.firewall_driver.setattr('instance_filter_exists', fake_none)
|
||||||
@@ -594,7 +595,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
|
|
||||||
# Start test
|
# Start test
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
conn = libvirt_conn.LibvirtConnection(False)
|
conn = connection.LibvirtConnection(False)
|
||||||
self.assertRaises(libvirt.libvirtError,
|
self.assertRaises(libvirt.libvirtError,
|
||||||
conn._live_migration,
|
conn._live_migration,
|
||||||
self.context, instance_ref, 'dest', '',
|
self.context, instance_ref, 'dest', '',
|
||||||
@@ -623,7 +624,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
|
|
||||||
# Start test
|
# Start test
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
conn = libvirt_conn.LibvirtConnection(False)
|
conn = connection.LibvirtConnection(False)
|
||||||
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
|
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
|
||||||
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
|
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
|
||||||
|
|
||||||
@@ -647,7 +648,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
self.assertTrue(count)
|
self.assertTrue(count)
|
||||||
|
|
||||||
def test_get_host_ip_addr(self):
|
def test_get_host_ip_addr(self):
|
||||||
conn = libvirt_conn.LibvirtConnection(False)
|
conn = connection.LibvirtConnection(False)
|
||||||
ip = conn.get_host_ip_addr()
|
ip = conn.get_host_ip_addr()
|
||||||
self.assertEquals(ip, FLAGS.my_ip)
|
self.assertEquals(ip, FLAGS.my_ip)
|
||||||
|
|
||||||
@@ -671,7 +672,7 @@ class IptablesFirewallTestCase(test.TestCase):
|
|||||||
class FakeLibvirtConnection(object):
|
class FakeLibvirtConnection(object):
|
||||||
pass
|
pass
|
||||||
self.fake_libvirt_connection = FakeLibvirtConnection()
|
self.fake_libvirt_connection = FakeLibvirtConnection()
|
||||||
self.fw = libvirt_conn.IptablesFirewallDriver(
|
self.fw = firewall.IptablesFirewallDriver(
|
||||||
get_connection=lambda: self.fake_libvirt_connection)
|
get_connection=lambda: self.fake_libvirt_connection)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
@@ -849,7 +850,7 @@ class IptablesFirewallTestCase(test.TestCase):
|
|||||||
self.assertEquals(len(rulesv4), 2)
|
self.assertEquals(len(rulesv4), 2)
|
||||||
self.assertEquals(len(rulesv6), 0)
|
self.assertEquals(len(rulesv6), 0)
|
||||||
|
|
||||||
def multinic_iptables_test(self):
|
def test_multinic_iptables(self):
|
||||||
ipv4_rules_per_network = 2
|
ipv4_rules_per_network = 2
|
||||||
ipv6_rules_per_network = 3
|
ipv6_rules_per_network = 3
|
||||||
networks_count = 5
|
networks_count = 5
|
||||||
@@ -869,6 +870,16 @@ class IptablesFirewallTestCase(test.TestCase):
|
|||||||
self.assertEquals(ipv6_network_rules,
|
self.assertEquals(ipv6_network_rules,
|
||||||
ipv6_rules_per_network * networks_count)
|
ipv6_rules_per_network * networks_count)
|
||||||
|
|
||||||
|
def test_do_refresh_security_group_rules(self):
|
||||||
|
instance_ref = self._create_instance_ref()
|
||||||
|
self.mox.StubOutWithMock(self.fw,
|
||||||
|
'add_filters_for_instance',
|
||||||
|
use_mock_anything=True)
|
||||||
|
self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg())
|
||||||
|
self.fw.instances[instance_ref['id']] = instance_ref
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
self.fw.do_refresh_security_group_rules("fake")
|
||||||
|
|
||||||
|
|
||||||
class NWFilterTestCase(test.TestCase):
|
class NWFilterTestCase(test.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
@@ -885,7 +896,7 @@ class NWFilterTestCase(test.TestCase):
|
|||||||
|
|
||||||
self.fake_libvirt_connection = Mock()
|
self.fake_libvirt_connection = Mock()
|
||||||
|
|
||||||
self.fw = libvirt_conn.NWFilterFirewall(
|
self.fw = firewall.NWFilterFirewall(
|
||||||
lambda: self.fake_libvirt_connection)
|
lambda: self.fake_libvirt_connection)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
117
nova/tests/test_notifier.py
Normal file
117
nova/tests/test_notifier.py
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import nova
|
||||||
|
|
||||||
|
from nova import context
|
||||||
|
from nova import flags
|
||||||
|
from nova import rpc
|
||||||
|
import nova.notifier.api
|
||||||
|
from nova.notifier.api import notify
|
||||||
|
from nova.notifier import no_op_notifier
|
||||||
|
from nova.notifier import rabbit_notifier
|
||||||
|
from nova import test
|
||||||
|
|
||||||
|
import stubout
|
||||||
|
|
||||||
|
|
||||||
|
class NotifierTestCase(test.TestCase):
|
||||||
|
"""Test case for notifications"""
|
||||||
|
def setUp(self):
|
||||||
|
super(NotifierTestCase, self).setUp()
|
||||||
|
self.stubs = stubout.StubOutForTesting()
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
self.stubs.UnsetAll()
|
||||||
|
super(NotifierTestCase, self).tearDown()
|
||||||
|
|
||||||
|
def test_send_notification(self):
|
||||||
|
self.notify_called = False
|
||||||
|
|
||||||
|
def mock_notify(cls, *args):
|
||||||
|
self.notify_called = True
|
||||||
|
|
||||||
|
self.stubs.Set(nova.notifier.no_op_notifier, 'notify',
|
||||||
|
mock_notify)
|
||||||
|
|
||||||
|
class Mock(object):
|
||||||
|
pass
|
||||||
|
notify('publisher_id', 'event_type',
|
||||||
|
nova.notifier.api.WARN, dict(a=3))
|
||||||
|
self.assertEqual(self.notify_called, True)
|
||||||
|
|
||||||
|
def test_verify_message_format(self):
|
||||||
|
"""A test to ensure changing the message format is prohibitively
|
||||||
|
annoying"""
|
||||||
|
|
||||||
|
def message_assert(message):
|
||||||
|
fields = [('publisher_id', 'publisher_id'),
|
||||||
|
('event_type', 'event_type'),
|
||||||
|
('priority', 'WARN'),
|
||||||
|
('payload', dict(a=3))]
|
||||||
|
for k, v in fields:
|
||||||
|
self.assertEqual(message[k], v)
|
||||||
|
self.assertTrue(len(message['message_id']) > 0)
|
||||||
|
self.assertTrue(len(message['timestamp']) > 0)
|
||||||
|
|
||||||
|
self.stubs.Set(nova.notifier.no_op_notifier, 'notify',
|
||||||
|
message_assert)
|
||||||
|
notify('publisher_id', 'event_type',
|
||||||
|
nova.notifier.api.WARN, dict(a=3))
|
||||||
|
|
||||||
|
def test_send_rabbit_notification(self):
|
||||||
|
self.stubs.Set(nova.flags.FLAGS, 'notification_driver',
|
||||||
|
'nova.notifier.rabbit_notifier')
|
||||||
|
self.mock_cast = False
|
||||||
|
|
||||||
|
def mock_cast(cls, *args):
|
||||||
|
self.mock_cast = True
|
||||||
|
|
||||||
|
class Mock(object):
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.stubs.Set(nova.rpc, 'cast', mock_cast)
|
||||||
|
notify('publisher_id', 'event_type',
|
||||||
|
nova.notifier.api.WARN, dict(a=3))
|
||||||
|
|
||||||
|
self.assertEqual(self.mock_cast, True)
|
||||||
|
|
||||||
|
def test_invalid_priority(self):
|
||||||
|
def mock_cast(cls, *args):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class Mock(object):
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.stubs.Set(nova.rpc, 'cast', mock_cast)
|
||||||
|
self.assertRaises(nova.notifier.api.BadPriorityException,
|
||||||
|
notify, 'publisher_id',
|
||||||
|
'event_type', 'not a priority', dict(a=3))
|
||||||
|
|
||||||
|
def test_rabbit_priority_queue(self):
|
||||||
|
self.stubs.Set(nova.flags.FLAGS, 'notification_driver',
|
||||||
|
'nova.notifier.rabbit_notifier')
|
||||||
|
self.stubs.Set(nova.flags.FLAGS, 'notification_topic',
|
||||||
|
'testnotify')
|
||||||
|
|
||||||
|
self.test_topic = None
|
||||||
|
|
||||||
|
def mock_cast(context, topic, msg):
|
||||||
|
self.test_topic = topic
|
||||||
|
|
||||||
|
self.stubs.Set(nova.rpc, 'cast', mock_cast)
|
||||||
|
notify('publisher_id',
|
||||||
|
'event_type', 'DEBUG', dict(a=3))
|
||||||
|
self.assertEqual(self.test_topic, 'testnotify.debug')
|
||||||
@@ -31,7 +31,6 @@ LOG = logging.getLogger('nova.tests.rpc')
|
|||||||
|
|
||||||
|
|
||||||
class RpcTestCase(test.TestCase):
|
class RpcTestCase(test.TestCase):
|
||||||
"""Test cases for rpc"""
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(RpcTestCase, self).setUp()
|
super(RpcTestCase, self).setUp()
|
||||||
self.conn = rpc.Connection.instance(True)
|
self.conn = rpc.Connection.instance(True)
|
||||||
@@ -43,14 +42,55 @@ class RpcTestCase(test.TestCase):
|
|||||||
self.context = context.get_admin_context()
|
self.context = context.get_admin_context()
|
||||||
|
|
||||||
def test_call_succeed(self):
|
def test_call_succeed(self):
|
||||||
"""Get a value through rpc call"""
|
|
||||||
value = 42
|
value = 42
|
||||||
result = rpc.call(self.context, 'test', {"method": "echo",
|
result = rpc.call(self.context, 'test', {"method": "echo",
|
||||||
"args": {"value": value}})
|
"args": {"value": value}})
|
||||||
self.assertEqual(value, result)
|
self.assertEqual(value, result)
|
||||||
|
|
||||||
|
def test_call_succeed_despite_multiple_returns(self):
|
||||||
|
value = 42
|
||||||
|
result = rpc.call(self.context, 'test', {"method": "echo_three_times",
|
||||||
|
"args": {"value": value}})
|
||||||
|
self.assertEqual(value + 2, result)
|
||||||
|
|
||||||
|
def test_call_succeed_despite_multiple_returns_yield(self):
|
||||||
|
value = 42
|
||||||
|
result = rpc.call(self.context, 'test',
|
||||||
|
{"method": "echo_three_times_yield",
|
||||||
|
"args": {"value": value}})
|
||||||
|
self.assertEqual(value + 2, result)
|
||||||
|
|
||||||
|
def test_multicall_succeed_once(self):
|
||||||
|
value = 42
|
||||||
|
result = rpc.multicall(self.context,
|
||||||
|
'test',
|
||||||
|
{"method": "echo",
|
||||||
|
"args": {"value": value}})
|
||||||
|
for i, x in enumerate(result):
|
||||||
|
if i > 0:
|
||||||
|
self.fail('should only receive one response')
|
||||||
|
self.assertEqual(value + i, x)
|
||||||
|
|
||||||
|
def test_multicall_succeed_three_times(self):
|
||||||
|
value = 42
|
||||||
|
result = rpc.multicall(self.context,
|
||||||
|
'test',
|
||||||
|
{"method": "echo_three_times",
|
||||||
|
"args": {"value": value}})
|
||||||
|
for i, x in enumerate(result):
|
||||||
|
self.assertEqual(value + i, x)
|
||||||
|
|
||||||
|
def test_multicall_succeed_three_times_yield(self):
|
||||||
|
value = 42
|
||||||
|
result = rpc.multicall(self.context,
|
||||||
|
'test',
|
||||||
|
{"method": "echo_three_times_yield",
|
||||||
|
"args": {"value": value}})
|
||||||
|
for i, x in enumerate(result):
|
||||||
|
self.assertEqual(value + i, x)
|
||||||
|
|
||||||
def test_context_passed(self):
|
def test_context_passed(self):
|
||||||
"""Makes sure a context is passed through rpc call"""
|
"""Makes sure a context is passed through rpc call."""
|
||||||
value = 42
|
value = 42
|
||||||
result = rpc.call(self.context,
|
result = rpc.call(self.context,
|
||||||
'test', {"method": "context",
|
'test', {"method": "context",
|
||||||
@@ -58,11 +98,12 @@ class RpcTestCase(test.TestCase):
|
|||||||
self.assertEqual(self.context.to_dict(), result)
|
self.assertEqual(self.context.to_dict(), result)
|
||||||
|
|
||||||
def test_call_exception(self):
|
def test_call_exception(self):
|
||||||
"""Test that exception gets passed back properly
|
"""Test that exception gets passed back properly.
|
||||||
|
|
||||||
rpc.call returns a RemoteError object. The value of the
|
rpc.call returns a RemoteError object. The value of the
|
||||||
exception is converted to a string, so we convert it back
|
exception is converted to a string, so we convert it back
|
||||||
to an int in the test.
|
to an int in the test.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
value = 42
|
value = 42
|
||||||
self.assertRaises(rpc.RemoteError,
|
self.assertRaises(rpc.RemoteError,
|
||||||
@@ -81,7 +122,7 @@ class RpcTestCase(test.TestCase):
|
|||||||
self.assertEqual(int(exc.value), value)
|
self.assertEqual(int(exc.value), value)
|
||||||
|
|
||||||
def test_nested_calls(self):
|
def test_nested_calls(self):
|
||||||
"""Test that we can do an rpc.call inside another call"""
|
"""Test that we can do an rpc.call inside another call."""
|
||||||
class Nested(object):
|
class Nested(object):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def echo(context, queue, value):
|
def echo(context, queue, value):
|
||||||
@@ -108,25 +149,80 @@ class RpcTestCase(test.TestCase):
|
|||||||
"value": value}})
|
"value": value}})
|
||||||
self.assertEqual(value, result)
|
self.assertEqual(value, result)
|
||||||
|
|
||||||
|
def test_connectionpool_single(self):
|
||||||
|
"""Test that ConnectionPool recycles a single connection."""
|
||||||
|
conn1 = rpc.ConnectionPool.get()
|
||||||
|
rpc.ConnectionPool.put(conn1)
|
||||||
|
conn2 = rpc.ConnectionPool.get()
|
||||||
|
rpc.ConnectionPool.put(conn2)
|
||||||
|
self.assertEqual(conn1, conn2)
|
||||||
|
|
||||||
|
def test_connectionpool_double(self):
|
||||||
|
"""Test that ConnectionPool returns and reuses separate connections.
|
||||||
|
|
||||||
|
When called consecutively we should get separate connections and upon
|
||||||
|
returning them those connections should be reused for future calls
|
||||||
|
before generating a new connection.
|
||||||
|
|
||||||
|
"""
|
||||||
|
conn1 = rpc.ConnectionPool.get()
|
||||||
|
conn2 = rpc.ConnectionPool.get()
|
||||||
|
|
||||||
|
self.assertNotEqual(conn1, conn2)
|
||||||
|
rpc.ConnectionPool.put(conn1)
|
||||||
|
rpc.ConnectionPool.put(conn2)
|
||||||
|
|
||||||
|
conn3 = rpc.ConnectionPool.get()
|
||||||
|
conn4 = rpc.ConnectionPool.get()
|
||||||
|
self.assertEqual(conn1, conn3)
|
||||||
|
self.assertEqual(conn2, conn4)
|
||||||
|
|
||||||
|
def test_connectionpool_limit(self):
|
||||||
|
"""Test connection pool limit and connection uniqueness."""
|
||||||
|
max_size = FLAGS.rpc_conn_pool_size
|
||||||
|
conns = []
|
||||||
|
|
||||||
|
for i in xrange(max_size):
|
||||||
|
conns.append(rpc.ConnectionPool.get())
|
||||||
|
|
||||||
|
self.assertFalse(rpc.ConnectionPool.free_items)
|
||||||
|
self.assertEqual(rpc.ConnectionPool.current_size,
|
||||||
|
rpc.ConnectionPool.max_size)
|
||||||
|
self.assertEqual(len(set(conns)), max_size)
|
||||||
|
|
||||||
|
|
||||||
class TestReceiver(object):
|
class TestReceiver(object):
|
||||||
"""Simple Proxy class so the consumer has methods to call
|
"""Simple Proxy class so the consumer has methods to call.
|
||||||
|
|
||||||
Uses static methods because we aren't actually storing any state"""
|
Uses static methods because we aren't actually storing any state.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def echo(context, value):
|
def echo(context, value):
|
||||||
"""Simply returns whatever value is sent in"""
|
"""Simply returns whatever value is sent in."""
|
||||||
LOG.debug(_("Received %s"), value)
|
LOG.debug(_("Received %s"), value)
|
||||||
return value
|
return value
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def context(context, value):
|
def context(context, value):
|
||||||
"""Returns dictionary version of context"""
|
"""Returns dictionary version of context."""
|
||||||
LOG.debug(_("Received %s"), context)
|
LOG.debug(_("Received %s"), context)
|
||||||
return context.to_dict()
|
return context.to_dict()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def echo_three_times(context, value):
|
||||||
|
context.reply(value)
|
||||||
|
context.reply(value + 1)
|
||||||
|
context.reply(value + 2)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def echo_three_times_yield(context, value):
|
||||||
|
yield value
|
||||||
|
yield value + 1
|
||||||
|
yield value + 2
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def fail(context, value):
|
def fail(context, value):
|
||||||
"""Raises an exception with the value sent in"""
|
"""Raises an exception with the value sent in."""
|
||||||
raise Exception(value)
|
raise Exception(value)
|
||||||
|
|||||||
@@ -45,10 +45,11 @@ class VolumeTestCase(test.TestCase):
|
|||||||
self.context = context.get_admin_context()
|
self.context = context.get_admin_context()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _create_volume(size='0'):
|
def _create_volume(size='0', snapshot_id=None):
|
||||||
"""Create a volume object."""
|
"""Create a volume object."""
|
||||||
vol = {}
|
vol = {}
|
||||||
vol['size'] = size
|
vol['size'] = size
|
||||||
|
vol['snapshot_id'] = snapshot_id
|
||||||
vol['user_id'] = 'fake'
|
vol['user_id'] = 'fake'
|
||||||
vol['project_id'] = 'fake'
|
vol['project_id'] = 'fake'
|
||||||
vol['availability_zone'] = FLAGS.storage_availability_zone
|
vol['availability_zone'] = FLAGS.storage_availability_zone
|
||||||
@@ -69,6 +70,25 @@ class VolumeTestCase(test.TestCase):
|
|||||||
self.context,
|
self.context,
|
||||||
volume_id)
|
volume_id)
|
||||||
|
|
||||||
|
def test_create_volume_from_snapshot(self):
|
||||||
|
"""Test volume can be created from a snapshot."""
|
||||||
|
volume_src_id = self._create_volume()
|
||||||
|
self.volume.create_volume(self.context, volume_src_id)
|
||||||
|
snapshot_id = self._create_snapshot(volume_src_id)
|
||||||
|
self.volume.create_snapshot(self.context, volume_src_id, snapshot_id)
|
||||||
|
volume_dst_id = self._create_volume(0, snapshot_id)
|
||||||
|
self.volume.create_volume(self.context, volume_dst_id, snapshot_id)
|
||||||
|
self.assertEqual(volume_dst_id, db.volume_get(
|
||||||
|
context.get_admin_context(),
|
||||||
|
volume_dst_id).id)
|
||||||
|
self.assertEqual(snapshot_id, db.volume_get(
|
||||||
|
context.get_admin_context(),
|
||||||
|
volume_dst_id).snapshot_id)
|
||||||
|
|
||||||
|
self.volume.delete_volume(self.context, volume_dst_id)
|
||||||
|
self.volume.delete_snapshot(self.context, snapshot_id)
|
||||||
|
self.volume.delete_volume(self.context, volume_src_id)
|
||||||
|
|
||||||
def test_too_big_volume(self):
|
def test_too_big_volume(self):
|
||||||
"""Ensure failure if a too large of a volume is requested."""
|
"""Ensure failure if a too large of a volume is requested."""
|
||||||
# FIXME(vish): validation needs to move into the data layer in
|
# FIXME(vish): validation needs to move into the data layer in
|
||||||
@@ -176,6 +196,34 @@ class VolumeTestCase(test.TestCase):
|
|||||||
# This will allow us to test cross-node interactions
|
# This will allow us to test cross-node interactions
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _create_snapshot(volume_id, size='0'):
|
||||||
|
"""Create a snapshot object."""
|
||||||
|
snap = {}
|
||||||
|
snap['volume_size'] = size
|
||||||
|
snap['user_id'] = 'fake'
|
||||||
|
snap['project_id'] = 'fake'
|
||||||
|
snap['volume_id'] = volume_id
|
||||||
|
snap['status'] = "creating"
|
||||||
|
return db.snapshot_create(context.get_admin_context(), snap)['id']
|
||||||
|
|
||||||
|
def test_create_delete_snapshot(self):
|
||||||
|
"""Test snapshot can be created and deleted."""
|
||||||
|
volume_id = self._create_volume()
|
||||||
|
self.volume.create_volume(self.context, volume_id)
|
||||||
|
snapshot_id = self._create_snapshot(volume_id)
|
||||||
|
self.volume.create_snapshot(self.context, volume_id, snapshot_id)
|
||||||
|
self.assertEqual(snapshot_id,
|
||||||
|
db.snapshot_get(context.get_admin_context(),
|
||||||
|
snapshot_id).id)
|
||||||
|
|
||||||
|
self.volume.delete_snapshot(self.context, snapshot_id)
|
||||||
|
self.assertRaises(exception.NotFound,
|
||||||
|
db.snapshot_get,
|
||||||
|
self.context,
|
||||||
|
snapshot_id)
|
||||||
|
self.volume.delete_volume(self.context, volume_id)
|
||||||
|
|
||||||
|
|
||||||
class DriverTestCase(test.TestCase):
|
class DriverTestCase(test.TestCase):
|
||||||
"""Base Test class for Drivers."""
|
"""Base Test class for Drivers."""
|
||||||
|
|||||||
@@ -395,6 +395,29 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
os_type="linux")
|
os_type="linux")
|
||||||
self.check_vm_params_for_linux()
|
self.check_vm_params_for_linux()
|
||||||
|
|
||||||
|
def test_spawn_vhd_glance_swapdisk(self):
|
||||||
|
# Change the default host_call_plugin to one that'll return
|
||||||
|
# a swap disk
|
||||||
|
orig_func = stubs.FakeSessionForVMTests.host_call_plugin
|
||||||
|
|
||||||
|
stubs.FakeSessionForVMTests.host_call_plugin = \
|
||||||
|
stubs.FakeSessionForVMTests.host_call_plugin_swap
|
||||||
|
|
||||||
|
try:
|
||||||
|
# We'll steal the above glance linux test
|
||||||
|
self.test_spawn_vhd_glance_linux()
|
||||||
|
finally:
|
||||||
|
# Make sure to put this back
|
||||||
|
stubs.FakeSessionForVMTests.host_call_plugin = orig_func
|
||||||
|
|
||||||
|
# We should have 2 VBDs.
|
||||||
|
self.assertEqual(len(self.vm['VBDs']), 2)
|
||||||
|
# Now test that we have 1.
|
||||||
|
self.tearDown()
|
||||||
|
self.setUp()
|
||||||
|
self.test_spawn_vhd_glance_linux()
|
||||||
|
self.assertEqual(len(self.vm['VBDs']), 1)
|
||||||
|
|
||||||
def test_spawn_vhd_glance_windows(self):
|
def test_spawn_vhd_glance_windows(self):
|
||||||
FLAGS.xenapi_image_service = 'glance'
|
FLAGS.xenapi_image_service = 'glance'
|
||||||
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
|
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
|
||||||
@@ -569,11 +592,29 @@ class XenAPIDiffieHellmanTestCase(test.TestCase):
|
|||||||
bob_shared = self.bob.compute_shared(alice_pub)
|
bob_shared = self.bob.compute_shared(alice_pub)
|
||||||
self.assertEquals(alice_shared, bob_shared)
|
self.assertEquals(alice_shared, bob_shared)
|
||||||
|
|
||||||
def test_encryption(self):
|
def _test_encryption(self, message):
|
||||||
msg = "This is a top-secret message"
|
enc = self.alice.encrypt(message)
|
||||||
enc = self.alice.encrypt(msg)
|
self.assertFalse(enc.endswith('\n'))
|
||||||
dec = self.bob.decrypt(enc)
|
dec = self.bob.decrypt(enc)
|
||||||
self.assertEquals(dec, msg)
|
self.assertEquals(dec, message)
|
||||||
|
|
||||||
|
def test_encrypt_simple_message(self):
|
||||||
|
self._test_encryption('This is a simple message.')
|
||||||
|
|
||||||
|
def test_encrypt_message_with_newlines_at_end(self):
|
||||||
|
self._test_encryption('This message has a newline at the end.\n')
|
||||||
|
|
||||||
|
def test_encrypt_many_newlines_at_end(self):
|
||||||
|
self._test_encryption('Message with lotsa newlines.\n\n\n')
|
||||||
|
|
||||||
|
def test_encrypt_newlines_inside_message(self):
|
||||||
|
self._test_encryption('Message\nwith\ninterior\nnewlines.')
|
||||||
|
|
||||||
|
def test_encrypt_with_leading_newlines(self):
|
||||||
|
self._test_encryption('\n\nMessage with leading newlines.')
|
||||||
|
|
||||||
|
def test_encrypt_really_long_message(self):
|
||||||
|
self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super(XenAPIDiffieHellmanTestCase, self).tearDown()
|
super(XenAPIDiffieHellmanTestCase, self).tearDown()
|
||||||
|
|||||||
@@ -17,6 +17,7 @@
|
|||||||
"""Stubouts, mocks and fixtures for the test suite"""
|
"""Stubouts, mocks and fixtures for the test suite"""
|
||||||
|
|
||||||
import eventlet
|
import eventlet
|
||||||
|
import json
|
||||||
from nova.virt import xenapi_conn
|
from nova.virt import xenapi_conn
|
||||||
from nova.virt.xenapi import fake
|
from nova.virt.xenapi import fake
|
||||||
from nova.virt.xenapi import volume_utils
|
from nova.virt.xenapi import volume_utils
|
||||||
@@ -37,7 +38,7 @@ def stubout_instance_snapshot(stubs):
|
|||||||
sr_ref=sr_ref, sharable=False)
|
sr_ref=sr_ref, sharable=False)
|
||||||
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
|
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
|
||||||
vdi_uuid = vdi_rec['uuid']
|
vdi_uuid = vdi_rec['uuid']
|
||||||
return vdi_uuid
|
return [dict(vdi_type='os', vdi_uuid=vdi_uuid)]
|
||||||
|
|
||||||
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
|
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
|
||||||
|
|
||||||
@@ -132,11 +133,30 @@ class FakeSessionForVMTests(fake.SessionBase):
|
|||||||
def __init__(self, uri):
|
def __init__(self, uri):
|
||||||
super(FakeSessionForVMTests, self).__init__(uri)
|
super(FakeSessionForVMTests, self).__init__(uri)
|
||||||
|
|
||||||
def host_call_plugin(self, _1, _2, _3, _4, _5):
|
def host_call_plugin(self, _1, _2, plugin, method, _5):
|
||||||
sr_ref = fake.get_all('SR')[0]
|
sr_ref = fake.get_all('SR')[0]
|
||||||
vdi_ref = fake.create_vdi('', False, sr_ref, False)
|
vdi_ref = fake.create_vdi('', False, sr_ref, False)
|
||||||
vdi_rec = fake.get_record('VDI', vdi_ref)
|
vdi_rec = fake.get_record('VDI', vdi_ref)
|
||||||
return '<string>%s</string>' % vdi_rec['uuid']
|
if plugin == "glance" and method == "download_vhd":
|
||||||
|
ret_str = json.dumps([dict(vdi_type='os',
|
||||||
|
vdi_uuid=vdi_rec['uuid'])])
|
||||||
|
else:
|
||||||
|
ret_str = vdi_rec['uuid']
|
||||||
|
return '<string>%s</string>' % ret_str
|
||||||
|
|
||||||
|
def host_call_plugin_swap(self, _1, _2, plugin, method, _5):
|
||||||
|
sr_ref = fake.get_all('SR')[0]
|
||||||
|
vdi_ref = fake.create_vdi('', False, sr_ref, False)
|
||||||
|
vdi_rec = fake.get_record('VDI', vdi_ref)
|
||||||
|
if plugin == "glance" and method == "download_vhd":
|
||||||
|
swap_vdi_ref = fake.create_vdi('', False, sr_ref, False)
|
||||||
|
swap_vdi_rec = fake.get_record('VDI', swap_vdi_ref)
|
||||||
|
ret_str = json.dumps(
|
||||||
|
[dict(vdi_type='os', vdi_uuid=vdi_rec['uuid']),
|
||||||
|
dict(vdi_type='swap', vdi_uuid=swap_vdi_rec['uuid'])])
|
||||||
|
else:
|
||||||
|
ret_str = vdi_rec['uuid']
|
||||||
|
return '<string>%s</string>' % ret_str
|
||||||
|
|
||||||
def VM_start(self, _1, ref, _2, _3):
|
def VM_start(self, _1, ref, _2, _3):
|
||||||
vm = fake.get_record('VM', ref)
|
vm = fake.get_record('VM', ref)
|
||||||
|
|||||||
Reference in New Issue
Block a user