merge in trunk, resolving conflicts with ttx's branch to switch from using sudo to run_as_root=True
This commit is contained in:
2
.mailmap
2
.mailmap
@@ -18,6 +18,8 @@
|
|||||||
<devin.carlen@gmail.com> <devcamcar@illian.local>
|
<devin.carlen@gmail.com> <devcamcar@illian.local>
|
||||||
<ewan.mellor@citrix.com> <emellor@silver>
|
<ewan.mellor@citrix.com> <emellor@silver>
|
||||||
<itoumsn@nttdata.co.jp> <itoumsn@shayol>
|
<itoumsn@nttdata.co.jp> <itoumsn@shayol>
|
||||||
|
<jake@ansolabs.com> <jake@markupisart.com>
|
||||||
|
<jake@ansolabs.com> <admin@jakedahn.com>
|
||||||
<jaypipes@gmail.com> <jpipes@serialcoder>
|
<jaypipes@gmail.com> <jpipes@serialcoder>
|
||||||
<jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local>
|
<jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local>
|
||||||
<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com>
|
<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com>
|
||||||
|
|||||||
2
Authors
2
Authors
@@ -37,6 +37,7 @@ Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
|
|||||||
Hisaki Ohara <hisaki.ohara@intel.com>
|
Hisaki Ohara <hisaki.ohara@intel.com>
|
||||||
Ilya Alekseyev <ilyaalekseyev@acm.org>
|
Ilya Alekseyev <ilyaalekseyev@acm.org>
|
||||||
Isaku Yamahata <yamahata@valinux.co.jp>
|
Isaku Yamahata <yamahata@valinux.co.jp>
|
||||||
|
Jake Dahn <jake@ansolabs.com>
|
||||||
Jason Cannavale <jason.cannavale@rackspace.com>
|
Jason Cannavale <jason.cannavale@rackspace.com>
|
||||||
Jason Koelker <jason@koelker.net>
|
Jason Koelker <jason@koelker.net>
|
||||||
Jay Pipes <jaypipes@gmail.com>
|
Jay Pipes <jaypipes@gmail.com>
|
||||||
@@ -64,6 +65,7 @@ Kirill Shileev <kshileev@gmail.com>
|
|||||||
Koji Iida <iida.koji@lab.ntt.co.jp>
|
Koji Iida <iida.koji@lab.ntt.co.jp>
|
||||||
Lorin Hochstein <lorin@isi.edu>
|
Lorin Hochstein <lorin@isi.edu>
|
||||||
Lvov Maxim <usrleon@gmail.com>
|
Lvov Maxim <usrleon@gmail.com>
|
||||||
|
Mandell Degerness <mdegerne@gmail.com>
|
||||||
Mark Washenberger <mark.washenberger@rackspace.com>
|
Mark Washenberger <mark.washenberger@rackspace.com>
|
||||||
Masanori Itoh <itoumsn@nttdata.co.jp>
|
Masanori Itoh <itoumsn@nttdata.co.jp>
|
||||||
Matt Dietz <matt.dietz@rackspace.com>
|
Matt Dietz <matt.dietz@rackspace.com>
|
||||||
|
|||||||
119
HACKING
119
HACKING
@@ -5,12 +5,23 @@ Step 1: Read http://www.python.org/dev/peps/pep-0008/
|
|||||||
Step 2: Read http://www.python.org/dev/peps/pep-0008/ again
|
Step 2: Read http://www.python.org/dev/peps/pep-0008/ again
|
||||||
Step 3: Read on
|
Step 3: Read on
|
||||||
|
|
||||||
|
|
||||||
|
General
|
||||||
|
-------
|
||||||
|
- Put two newlines between top-level code (funcs, classes, etc)
|
||||||
|
- Put one newline between methods in classes and anywhere else
|
||||||
|
- Do not write "except:", use "except Exception:" at the very least
|
||||||
|
- Include your name with TODOs as in "#TODO(termie)"
|
||||||
|
- Do not name anything the same name as a built-in or reserved word
|
||||||
|
|
||||||
|
|
||||||
Imports
|
Imports
|
||||||
-------
|
-------
|
||||||
- thou shalt not import objects, only modules
|
- Do not import objects, only modules
|
||||||
- thou shalt not import more than one module per line
|
- Do not import more than one module per line
|
||||||
- thou shalt not make relative imports
|
- Do not make relative imports
|
||||||
- thou shalt organize your imports according to the following template
|
- Order your imports by the full module path
|
||||||
|
- Organize your imports according to the following template
|
||||||
|
|
||||||
::
|
::
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
@@ -22,16 +33,6 @@ Imports
|
|||||||
{{begin your code}}
|
{{begin your code}}
|
||||||
|
|
||||||
|
|
||||||
General
|
|
||||||
-------
|
|
||||||
- thou shalt put two newlines twixt toplevel code (funcs, classes, etc)
|
|
||||||
- thou shalt put one newline twixt methods in classes and anywhere else
|
|
||||||
- thou shalt not write "except:", use "except Exception:" at the very least
|
|
||||||
- thou shalt include your name with TODOs as in "TODO(termie)"
|
|
||||||
- thou shalt not name anything the same name as a builtin or reserved word
|
|
||||||
- thou shalt not violate causality in our time cone, or else
|
|
||||||
|
|
||||||
|
|
||||||
Human Alphabetical Order Examples
|
Human Alphabetical Order Examples
|
||||||
---------------------------------
|
---------------------------------
|
||||||
::
|
::
|
||||||
@@ -42,11 +43,13 @@ Human Alphabetical Order Examples
|
|||||||
import time
|
import time
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
from nova import flags
|
import nova.api.ec2
|
||||||
from nova import test
|
from nova.api import openstack
|
||||||
from nova.auth import users
|
from nova.auth import users
|
||||||
from nova.endpoint import api
|
import nova.flags
|
||||||
from nova.endpoint import cloud
|
from nova.endpoint import cloud
|
||||||
|
from nova import test
|
||||||
|
|
||||||
|
|
||||||
Docstrings
|
Docstrings
|
||||||
----------
|
----------
|
||||||
@@ -70,6 +73,88 @@ Docstrings
|
|||||||
|
|
||||||
:param foo: the foo parameter
|
:param foo: the foo parameter
|
||||||
:param bar: the bar parameter
|
:param bar: the bar parameter
|
||||||
|
:returns: return_type -- description of the return value
|
||||||
:returns: description of the return value
|
:returns: description of the return value
|
||||||
|
:raises: AttributeError, KeyError
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
Dictionaries/Lists
|
||||||
|
------------------
|
||||||
|
If a dictionary (dict) or list object is longer than 80 characters, its
|
||||||
|
items should be split with newlines. Embedded iterables should have their
|
||||||
|
items indented. Additionally, the last item in the dictionary should have
|
||||||
|
a trailing comma. This increases readability and simplifies future diffs.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
my_dictionary = {
|
||||||
|
"image": {
|
||||||
|
"name": "Just a Snapshot",
|
||||||
|
"size": 2749573,
|
||||||
|
"properties": {
|
||||||
|
"user_id": 12,
|
||||||
|
"arch": "x86_64",
|
||||||
|
},
|
||||||
|
"things": [
|
||||||
|
"thing_one",
|
||||||
|
"thing_two",
|
||||||
|
],
|
||||||
|
"status": "ACTIVE",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Calling Methods
|
||||||
|
---------------
|
||||||
|
Calls to methods 80 characters or longer should format each argument with
|
||||||
|
newlines. This is not a requirement, but a guideline.
|
||||||
|
|
||||||
|
unnecessarily_long_function_name('string one',
|
||||||
|
'string two',
|
||||||
|
kwarg1=constants.ACTIVE,
|
||||||
|
kwarg2=['a', 'b', 'c'])
|
||||||
|
|
||||||
|
|
||||||
|
Rather than constructing parameters inline, it is better to break things up:
|
||||||
|
|
||||||
|
list_of_strings = [
|
||||||
|
'what_a_long_string',
|
||||||
|
'not as long',
|
||||||
|
]
|
||||||
|
|
||||||
|
dict_of_numbers = {
|
||||||
|
'one': 1,
|
||||||
|
'two': 2,
|
||||||
|
'twenty four': 24,
|
||||||
|
}
|
||||||
|
|
||||||
|
object_one.call_a_method('string three',
|
||||||
|
'string four',
|
||||||
|
kwarg1=list_of_strings,
|
||||||
|
kwarg2=dict_of_numbers)
|
||||||
|
|
||||||
|
|
||||||
|
Internationalization (i18n) Strings
|
||||||
|
-----------------------------------
|
||||||
|
In order to support multiple languages, we have a mechanism to support
|
||||||
|
automatic translations of exception and log strings.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
msg = _("An error occurred")
|
||||||
|
raise HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
|
If you have a variable to place within the string, first internationalize
|
||||||
|
the template string then do the replacement.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
msg = _("Missing parameter: %s") % ("flavor",)
|
||||||
|
LOG.error(msg)
|
||||||
|
|
||||||
|
If you have multiple variables to place in the string, use keyword
|
||||||
|
parameters. This helps our translators reorder parameters when needed.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
msg = _("The server with id %(s_id)s has no key %(m_key)s")
|
||||||
|
LOG.error(msg % {"s_id": "1234", "m_key": "imageId"})
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ graft bzrplugins
|
|||||||
graft contrib
|
graft contrib
|
||||||
graft po
|
graft po
|
||||||
graft plugins
|
graft plugins
|
||||||
|
graft nova/api/openstack/schemas
|
||||||
include nova/api/openstack/notes.txt
|
include nova/api/openstack/notes.txt
|
||||||
include nova/auth/*.schema
|
include nova/auth/*.schema
|
||||||
include nova/auth/novarc.template
|
include nova/auth/novarc.template
|
||||||
|
|||||||
@@ -114,11 +114,11 @@ class AjaxConsoleProxy(object):
|
|||||||
AjaxConsoleProxy.tokens[kwargs['token']] = \
|
AjaxConsoleProxy.tokens[kwargs['token']] = \
|
||||||
{'args': kwargs, 'last_activity': time.time()}
|
{'args': kwargs, 'last_activity': time.time()}
|
||||||
|
|
||||||
conn = rpc.Connection.instance(new=True)
|
conn = rpc.create_connection(new=True)
|
||||||
consumer = rpc.TopicAdapterConsumer(
|
consumer = rpc.create_consumer(
|
||||||
connection=conn,
|
conn,
|
||||||
proxy=TopicProxy,
|
FLAGS.ajax_console_proxy_topic,
|
||||||
topic=FLAGS.ajax_console_proxy_topic)
|
TopicProxy)
|
||||||
|
|
||||||
def delete_expired_tokens():
|
def delete_expired_tokens():
|
||||||
now = time.time()
|
now = time.time()
|
||||||
|
|||||||
@@ -1,110 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Download images from Canonical Image Store
|
|
||||||
"""
|
|
||||||
|
|
||||||
import gettext
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import tempfile
|
|
||||||
import shutil
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
import urllib2
|
|
||||||
|
|
||||||
# If ../nova/__init__.py exists, add ../ to Python search path, so that
|
|
||||||
# it will override what happens to be installed in /usr/(local/)lib/python...
|
|
||||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
|
||||||
os.pardir,
|
|
||||||
os.pardir))
|
|
||||||
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
|
||||||
sys.path.insert(0, possible_topdir)
|
|
||||||
|
|
||||||
gettext.install('nova', unicode=1)
|
|
||||||
|
|
||||||
from nova import flags
|
|
||||||
from nova import log as logging
|
|
||||||
from nova import utils
|
|
||||||
from nova.objectstore import image
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
|
|
||||||
API_URL = 'https://imagestore.canonical.com/api/dashboard'
|
|
||||||
|
|
||||||
|
|
||||||
def get_images():
|
|
||||||
"""Get a list of the images from the imagestore URL."""
|
|
||||||
images = json.load(urllib2.urlopen(API_URL))['images']
|
|
||||||
images = [img for img in images if img['title'].find('amd64') > -1]
|
|
||||||
return images
|
|
||||||
|
|
||||||
|
|
||||||
def download(img):
|
|
||||||
"""Download an image to the local filesystem."""
|
|
||||||
# FIXME(ja): add checksum/signature checks
|
|
||||||
tempdir = tempfile.mkdtemp(prefix='cis-')
|
|
||||||
|
|
||||||
kernel_id = None
|
|
||||||
ramdisk_id = None
|
|
||||||
|
|
||||||
for f in img['files']:
|
|
||||||
if f['kind'] == 'kernel':
|
|
||||||
dest = os.path.join(tempdir, 'kernel')
|
|
||||||
subprocess.call(['curl', '--fail', f['url'], '-o', dest])
|
|
||||||
kernel_id = image.Image.add(dest,
|
|
||||||
description='kernel/' + img['title'], kernel=True)
|
|
||||||
|
|
||||||
for f in img['files']:
|
|
||||||
if f['kind'] == 'ramdisk':
|
|
||||||
dest = os.path.join(tempdir, 'ramdisk')
|
|
||||||
subprocess.call(['curl', '--fail', f['url'], '-o', dest])
|
|
||||||
ramdisk_id = image.Image.add(dest,
|
|
||||||
description='ramdisk/' + img['title'], ramdisk=True)
|
|
||||||
|
|
||||||
for f in img['files']:
|
|
||||||
if f['kind'] == 'image':
|
|
||||||
dest = os.path.join(tempdir, 'image')
|
|
||||||
subprocess.call(['curl', '--fail', f['url'], '-o', dest])
|
|
||||||
ramdisk_id = image.Image.add(dest,
|
|
||||||
description=img['title'], kernel=kernel_id, ramdisk=ramdisk_id)
|
|
||||||
|
|
||||||
shutil.rmtree(tempdir)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""Main entry point."""
|
|
||||||
utils.default_flagfile()
|
|
||||||
argv = FLAGS(sys.argv)
|
|
||||||
logging.setup()
|
|
||||||
images = get_images()
|
|
||||||
|
|
||||||
if len(argv) == 2:
|
|
||||||
for img in images:
|
|
||||||
if argv[1] == 'all' or argv[1] == img['title']:
|
|
||||||
download(img)
|
|
||||||
else:
|
|
||||||
print 'usage: %s (title|all)'
|
|
||||||
print 'available images:'
|
|
||||||
for img in images:
|
|
||||||
print img['title']
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
@@ -56,6 +56,7 @@
|
|||||||
import gettext
|
import gettext
|
||||||
import glob
|
import glob
|
||||||
import json
|
import json
|
||||||
|
import math
|
||||||
import netaddr
|
import netaddr
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@@ -591,6 +592,31 @@ class FixedIpCommands(object):
|
|||||||
fixed_ip['address'],
|
fixed_ip['address'],
|
||||||
mac_address, hostname, host)
|
mac_address, hostname, host)
|
||||||
|
|
||||||
|
@args('--address', dest="address", metavar='<ip address>',
|
||||||
|
help='IP address')
|
||||||
|
def reserve(self, address):
|
||||||
|
"""Mark fixed ip as reserved
|
||||||
|
arguments: address"""
|
||||||
|
self._set_reserved(address, True)
|
||||||
|
|
||||||
|
@args('--address', dest="address", metavar='<ip address>',
|
||||||
|
help='IP address')
|
||||||
|
def unreserve(self, address):
|
||||||
|
"""Mark fixed ip as free to use
|
||||||
|
arguments: address"""
|
||||||
|
self._set_reserved(address, False)
|
||||||
|
|
||||||
|
def _set_reserved(self, address, reserved):
|
||||||
|
ctxt = context.get_admin_context()
|
||||||
|
|
||||||
|
try:
|
||||||
|
fixed_ip = db.fixed_ip_get_by_address(ctxt, address)
|
||||||
|
db.fixed_ip_update(ctxt, fixed_ip['address'],
|
||||||
|
{'reserved': reserved})
|
||||||
|
except exception.NotFound as ex:
|
||||||
|
print "error: %s" % ex
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
|
||||||
class FloatingIpCommands(object):
|
class FloatingIpCommands(object):
|
||||||
"""Class for managing floating ip."""
|
"""Class for managing floating ip."""
|
||||||
@@ -694,7 +720,16 @@ class NetworkCommands(object):
|
|||||||
if not num_networks:
|
if not num_networks:
|
||||||
num_networks = FLAGS.num_networks
|
num_networks = FLAGS.num_networks
|
||||||
if not network_size:
|
if not network_size:
|
||||||
network_size = FLAGS.network_size
|
fixnet = netaddr.IPNetwork(fixed_range_v4)
|
||||||
|
each_subnet_size = fixnet.size / int(num_networks)
|
||||||
|
if each_subnet_size > FLAGS.network_size:
|
||||||
|
network_size = FLAGS.network_size
|
||||||
|
subnet = 32 - int(math.log(network_size, 2))
|
||||||
|
oversize_msg = _('Subnet(s) too large, defaulting to /%s.'
|
||||||
|
' To override, specify network_size flag.') % subnet
|
||||||
|
print oversize_msg
|
||||||
|
else:
|
||||||
|
network_size = fixnet.size
|
||||||
if not multi_host:
|
if not multi_host:
|
||||||
multi_host = FLAGS.multi_host
|
multi_host = FLAGS.multi_host
|
||||||
else:
|
else:
|
||||||
@@ -1084,10 +1119,12 @@ class InstanceTypeCommands(object):
|
|||||||
|
|
||||||
@args('--name', dest='name', metavar='<name>',
|
@args('--name', dest='name', metavar='<name>',
|
||||||
help='Name of instance type/flavor')
|
help='Name of instance type/flavor')
|
||||||
def delete(self, name, purge=None):
|
@args('--purge', action="store_true", dest='purge', default=False,
|
||||||
|
help='purge record from database')
|
||||||
|
def delete(self, name, purge):
|
||||||
"""Marks instance types / flavors as deleted"""
|
"""Marks instance types / flavors as deleted"""
|
||||||
try:
|
try:
|
||||||
if purge == "--purge":
|
if purge:
|
||||||
instance_types.purge(name)
|
instance_types.purge(name)
|
||||||
verb = "purged"
|
verb = "purged"
|
||||||
else:
|
else:
|
||||||
@@ -1224,11 +1261,12 @@ class ImageCommands(object):
|
|||||||
is_public, architecture)
|
is_public, architecture)
|
||||||
|
|
||||||
def _lookup(self, old_image_id):
|
def _lookup(self, old_image_id):
|
||||||
|
elevated = context.get_admin_context()
|
||||||
try:
|
try:
|
||||||
internal_id = ec2utils.ec2_id_to_id(old_image_id)
|
internal_id = ec2utils.ec2_id_to_id(old_image_id)
|
||||||
image = self.image_service.show(context, internal_id)
|
image = self.image_service.show(elevated, internal_id)
|
||||||
except (exception.InvalidEc2Id, exception.ImageNotFound):
|
except (exception.InvalidEc2Id, exception.ImageNotFound):
|
||||||
image = self.image_service.show_by_name(context, old_image_id)
|
image = self.image_service.show_by_name(elevated, old_image_id)
|
||||||
return image['id']
|
return image['id']
|
||||||
|
|
||||||
def _old_to_new(self, old):
|
def _old_to_new(self, old):
|
||||||
|
|||||||
@@ -16,3 +16,4 @@ export NOVA_API_KEY="%(access)s"
|
|||||||
export NOVA_USERNAME="%(user)s"
|
export NOVA_USERNAME="%(user)s"
|
||||||
export NOVA_PROJECT_ID="%(project)s"
|
export NOVA_PROJECT_ID="%(project)s"
|
||||||
export NOVA_URL="%(os)s"
|
export NOVA_URL="%(os)s"
|
||||||
|
export NOVA_VERSION="1.1"
|
||||||
|
|||||||
@@ -317,7 +317,7 @@ DEFINE_string('osapi_extensions_path', '/var/lib/nova/extensions',
|
|||||||
DEFINE_string('osapi_host', '$my_ip', 'ip of api server')
|
DEFINE_string('osapi_host', '$my_ip', 'ip of api server')
|
||||||
DEFINE_string('osapi_scheme', 'http', 'prefix for openstack')
|
DEFINE_string('osapi_scheme', 'http', 'prefix for openstack')
|
||||||
DEFINE_integer('osapi_port', 8774, 'OpenStack API port')
|
DEFINE_integer('osapi_port', 8774, 'OpenStack API port')
|
||||||
DEFINE_string('osapi_path', '/v1.0/', 'suffix for openstack')
|
DEFINE_string('osapi_path', '/v1.1/', 'suffix for openstack')
|
||||||
DEFINE_integer('osapi_max_limit', 1000,
|
DEFINE_integer('osapi_max_limit', 1000,
|
||||||
'max number of items returned in a collection response')
|
'max number of items returned in a collection response')
|
||||||
|
|
||||||
@@ -387,3 +387,11 @@ DEFINE_list('zone_capabilities',
|
|||||||
'Key/Multi-value list representng capabilities of this zone')
|
'Key/Multi-value list representng capabilities of this zone')
|
||||||
DEFINE_string('build_plan_encryption_key', None,
|
DEFINE_string('build_plan_encryption_key', None,
|
||||||
'128bit (hex) encryption key for scheduler build plans.')
|
'128bit (hex) encryption key for scheduler build plans.')
|
||||||
|
|
||||||
|
DEFINE_bool('start_guests_on_host_boot', False,
|
||||||
|
'Whether to restart guests when the host reboots')
|
||||||
|
DEFINE_bool('resume_guests_state_on_host_boot', False,
|
||||||
|
'Whether to start guests, that was running before the host reboot')
|
||||||
|
|
||||||
|
DEFINE_string('root_helper', 'sudo',
|
||||||
|
'Command prefix to use for running commands as root')
|
||||||
|
|||||||
@@ -17,7 +17,8 @@
|
|||||||
Handles all requests relating to schedulers.
|
Handles all requests relating to schedulers.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import novaclient
|
from novaclient import v1_1 as novaclient
|
||||||
|
from novaclient import exceptions as novaclient_exceptions
|
||||||
|
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
@@ -112,7 +113,7 @@ def _wrap_method(function, self):
|
|||||||
def _process(func, zone):
|
def _process(func, zone):
|
||||||
"""Worker stub for green thread pool. Give the worker
|
"""Worker stub for green thread pool. Give the worker
|
||||||
an authenticated nova client and zone info."""
|
an authenticated nova client and zone info."""
|
||||||
nova = novaclient.OpenStack(zone.username, zone.password, None,
|
nova = novaclient.Client(zone.username, zone.password, None,
|
||||||
zone.api_url)
|
zone.api_url)
|
||||||
nova.authenticate()
|
nova.authenticate()
|
||||||
return func(nova, zone)
|
return func(nova, zone)
|
||||||
@@ -132,10 +133,10 @@ def call_zone_method(context, method_name, errors_to_ignore=None,
|
|||||||
zones = db.zone_get_all(context)
|
zones = db.zone_get_all(context)
|
||||||
for zone in zones:
|
for zone in zones:
|
||||||
try:
|
try:
|
||||||
nova = novaclient.OpenStack(zone.username, zone.password, None,
|
nova = novaclient.Client(zone.username, zone.password, None,
|
||||||
zone.api_url)
|
zone.api_url)
|
||||||
nova.authenticate()
|
nova.authenticate()
|
||||||
except novaclient.exceptions.BadRequest, e:
|
except novaclient_exceptions.BadRequest, e:
|
||||||
url = zone.api_url
|
url = zone.api_url
|
||||||
LOG.warn(_("Failed request to zone; URL=%(url)s: %(e)s")
|
LOG.warn(_("Failed request to zone; URL=%(url)s: %(e)s")
|
||||||
% locals())
|
% locals())
|
||||||
@@ -188,7 +189,7 @@ def _issue_novaclient_command(nova, zone, collection,
|
|||||||
if method_name in ['find', 'findall']:
|
if method_name in ['find', 'findall']:
|
||||||
try:
|
try:
|
||||||
return getattr(manager, method_name)(**kwargs)
|
return getattr(manager, method_name)(**kwargs)
|
||||||
except novaclient.NotFound:
|
except novaclient_exceptions.NotFound:
|
||||||
url = zone.api_url
|
url = zone.api_url
|
||||||
LOG.debug(_("%(collection)s.%(method_name)s didn't find "
|
LOG.debug(_("%(collection)s.%(method_name)s didn't find "
|
||||||
"anything matching '%(kwargs)s' on '%(url)s'" %
|
"anything matching '%(kwargs)s' on '%(url)s'" %
|
||||||
@@ -200,7 +201,7 @@ def _issue_novaclient_command(nova, zone, collection,
|
|||||||
item = args.pop(0)
|
item = args.pop(0)
|
||||||
try:
|
try:
|
||||||
result = manager.get(item)
|
result = manager.get(item)
|
||||||
except novaclient.NotFound:
|
except novaclient_exceptions.NotFound:
|
||||||
url = zone.api_url
|
url = zone.api_url
|
||||||
LOG.debug(_("%(collection)s '%(item)s' not found on '%(url)s'" %
|
LOG.debug(_("%(collection)s '%(item)s' not found on '%(url)s'" %
|
||||||
locals()))
|
locals()))
|
||||||
|
|||||||
@@ -96,7 +96,8 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
|||||||
cost_fn_str=cost_fn_str)
|
cost_fn_str=cost_fn_str)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
weight = getattr(FLAGS, "%s_weight" % cost_fn.__name__)
|
flag_name = "%s_weight" % cost_fn.__name__
|
||||||
|
weight = getattr(FLAGS, flag_name)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
raise exception.SchedulerWeightFlagNotFound(
|
raise exception.SchedulerWeightFlagNotFound(
|
||||||
flag_name=flag_name)
|
flag_name=flag_name)
|
||||||
|
|||||||
@@ -24,7 +24,9 @@ import operator
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
import M2Crypto
|
import M2Crypto
|
||||||
import novaclient
|
|
||||||
|
from novaclient import v1_1 as novaclient
|
||||||
|
from novaclient import exceptions as novaclient_exceptions
|
||||||
|
|
||||||
from nova import crypto
|
from nova import crypto
|
||||||
from nova import db
|
from nova import db
|
||||||
@@ -58,12 +60,13 @@ class ZoneAwareScheduler(driver.Scheduler):
|
|||||||
"""Create the requested resource in this Zone."""
|
"""Create the requested resource in this Zone."""
|
||||||
host = build_plan_item['hostname']
|
host = build_plan_item['hostname']
|
||||||
base_options = request_spec['instance_properties']
|
base_options = request_spec['instance_properties']
|
||||||
|
image = request_spec['image']
|
||||||
|
|
||||||
# TODO(sandy): I guess someone needs to add block_device_mapping
|
# TODO(sandy): I guess someone needs to add block_device_mapping
|
||||||
# support at some point? Also, OS API has no concept of security
|
# support at some point? Also, OS API has no concept of security
|
||||||
# groups.
|
# groups.
|
||||||
instance = compute_api.API().create_db_entry_for_new_instance(context,
|
instance = compute_api.API().create_db_entry_for_new_instance(context,
|
||||||
base_options, None, [])
|
image, base_options, None, [])
|
||||||
|
|
||||||
instance_id = instance['id']
|
instance_id = instance['id']
|
||||||
kwargs['instance_id'] = instance_id
|
kwargs['instance_id'] = instance_id
|
||||||
@@ -117,10 +120,9 @@ class ZoneAwareScheduler(driver.Scheduler):
|
|||||||
% locals())
|
% locals())
|
||||||
nova = None
|
nova = None
|
||||||
try:
|
try:
|
||||||
nova = novaclient.OpenStack(zone.username, zone.password, None,
|
nova = novaclient.Client(zone.username, zone.password, None, url)
|
||||||
url)
|
|
||||||
nova.authenticate()
|
nova.authenticate()
|
||||||
except novaclient.exceptions.BadRequest, e:
|
except novaclient_exceptions.BadRequest, e:
|
||||||
raise exception.NotAuthorized(_("Bad credentials attempting "
|
raise exception.NotAuthorized(_("Bad credentials attempting "
|
||||||
"to talk to zone at %(url)s.") % locals())
|
"to talk to zone at %(url)s.") % locals())
|
||||||
|
|
||||||
@@ -264,8 +266,8 @@ class ZoneAwareScheduler(driver.Scheduler):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if topic != "compute":
|
if topic != "compute":
|
||||||
raise NotImplemented(_("Zone Aware Scheduler only understands "
|
raise NotImplementedError(_("Zone Aware Scheduler only understands"
|
||||||
"Compute nodes (for now)"))
|
" Compute nodes (for now)"))
|
||||||
|
|
||||||
num_instances = request_spec.get('num_instances', 1)
|
num_instances = request_spec.get('num_instances', 1)
|
||||||
instance_type = request_spec['instance_type']
|
instance_type = request_spec['instance_type']
|
||||||
|
|||||||
@@ -18,10 +18,11 @@ ZoneManager oversees all communications with child Zones.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import novaclient
|
|
||||||
import thread
|
import thread
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
|
from novaclient import v1_1 as novaclient
|
||||||
|
|
||||||
from eventlet import greenpool
|
from eventlet import greenpool
|
||||||
|
|
||||||
from nova import db
|
from nova import db
|
||||||
@@ -89,8 +90,8 @@ class ZoneState(object):
|
|||||||
|
|
||||||
def _call_novaclient(zone):
|
def _call_novaclient(zone):
|
||||||
"""Call novaclient. Broken out for testing purposes."""
|
"""Call novaclient. Broken out for testing purposes."""
|
||||||
client = novaclient.OpenStack(zone.username, zone.password, None,
|
client = novaclient.Client(zone.username, zone.password, None,
|
||||||
zone.api_url)
|
zone.api_url)
|
||||||
return client.zones.info()._info
|
return client.zones.info()._info
|
||||||
|
|
||||||
|
|
||||||
@@ -197,7 +198,7 @@ class ZoneManager(object):
|
|||||||
def update_service_capabilities(self, service_name, host, capabilities):
|
def update_service_capabilities(self, service_name, host, capabilities):
|
||||||
"""Update the per-service capabilities based on this notification."""
|
"""Update the per-service capabilities based on this notification."""
|
||||||
logging.debug(_("Received %(service_name)s service update from "
|
logging.debug(_("Received %(service_name)s service update from "
|
||||||
"%(host)s: %(capabilities)s") % locals())
|
"%(host)s.") % locals())
|
||||||
service_caps = self.service_states.get(host, {})
|
service_caps = self.service_states.get(host, {})
|
||||||
capabilities["timestamp"] = utils.utcnow() # Reported time
|
capabilities["timestamp"] = utils.utcnow() # Reported time
|
||||||
service_caps[service_name] = capabilities
|
service_caps[service_name] = capabilities
|
||||||
|
|||||||
@@ -21,13 +21,9 @@ import random
|
|||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import flags
|
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova.virt import hyperv
|
from nova.virt import hyperv
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
FLAGS.connection_type = 'hyperv'
|
|
||||||
|
|
||||||
|
|
||||||
class HyperVTestCase(test.TestCase):
|
class HyperVTestCase(test.TestCase):
|
||||||
"""Test cases for the Hyper-V driver"""
|
"""Test cases for the Hyper-V driver"""
|
||||||
@@ -36,6 +32,7 @@ class HyperVTestCase(test.TestCase):
|
|||||||
self.user_id = 'fake'
|
self.user_id = 'fake'
|
||||||
self.project_id = 'fake'
|
self.project_id = 'fake'
|
||||||
self.context = context.RequestContext(self.user_id, self.project_id)
|
self.context = context.RequestContext(self.user_id, self.project_id)
|
||||||
|
self.flags(connection_type='hyperv')
|
||||||
|
|
||||||
def test_create_destroy(self):
|
def test_create_destroy(self):
|
||||||
"""Create a VM and destroy it"""
|
"""Create a VM and destroy it"""
|
||||||
|
|||||||
@@ -19,12 +19,9 @@ Tests For Scheduler Host Filters.
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova.scheduler import host_filter
|
from nova.scheduler import host_filter
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
|
|
||||||
|
|
||||||
class FakeZoneManager:
|
class FakeZoneManager:
|
||||||
pass
|
pass
|
||||||
@@ -57,9 +54,9 @@ class HostFilterTestCase(test.TestCase):
|
|||||||
'host_name-label': 'xs-%s' % multiplier}
|
'host_name-label': 'xs-%s' % multiplier}
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.old_flag = FLAGS.default_host_filter
|
super(HostFilterTestCase, self).setUp()
|
||||||
FLAGS.default_host_filter = \
|
default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter'
|
||||||
'nova.scheduler.host_filter.AllHostsFilter'
|
self.flags(default_host_filter=default_host_filter)
|
||||||
self.instance_type = dict(name='tiny',
|
self.instance_type = dict(name='tiny',
|
||||||
memory_mb=50,
|
memory_mb=50,
|
||||||
vcpus=10,
|
vcpus=10,
|
||||||
@@ -98,9 +95,6 @@ class HostFilterTestCase(test.TestCase):
|
|||||||
host09['xpu_arch'] = 'fermi'
|
host09['xpu_arch'] = 'fermi'
|
||||||
host09['xpu_info'] = 'Tesla 2150'
|
host09['xpu_info'] = 'Tesla 2150'
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
FLAGS.default_host_filter = self.old_flag
|
|
||||||
|
|
||||||
def test_choose_filter(self):
|
def test_choose_filter(self):
|
||||||
# Test default filter ...
|
# Test default filter ...
|
||||||
hf = host_filter.choose_host_filter()
|
hf = host_filter.choose_host_filter()
|
||||||
|
|||||||
@@ -16,13 +16,11 @@
|
|||||||
Tests For Least Cost Scheduler
|
Tests For Least Cost Scheduler
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from nova import flags
|
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova.scheduler import least_cost
|
from nova.scheduler import least_cost
|
||||||
from nova.tests.scheduler import test_zone_aware_scheduler
|
from nova.tests.scheduler import test_zone_aware_scheduler
|
||||||
|
|
||||||
MB = 1024 * 1024
|
MB = 1024 * 1024
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
|
|
||||||
|
|
||||||
class FakeHost(object):
|
class FakeHost(object):
|
||||||
@@ -95,10 +93,9 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
|||||||
self.assertWeights(expected, num, request_spec, hosts)
|
self.assertWeights(expected, num, request_spec, hosts)
|
||||||
|
|
||||||
def test_noop_cost_fn(self):
|
def test_noop_cost_fn(self):
|
||||||
FLAGS.least_cost_scheduler_cost_functions = [
|
self.flags(least_cost_scheduler_cost_functions=[
|
||||||
'nova.scheduler.least_cost.noop_cost_fn',
|
'nova.scheduler.least_cost.noop_cost_fn'],
|
||||||
]
|
noop_cost_fn_weight=1)
|
||||||
FLAGS.noop_cost_fn_weight = 1
|
|
||||||
|
|
||||||
num = 1
|
num = 1
|
||||||
request_spec = {}
|
request_spec = {}
|
||||||
@@ -109,10 +106,9 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
|||||||
self.assertWeights(expected, num, request_spec, hosts)
|
self.assertWeights(expected, num, request_spec, hosts)
|
||||||
|
|
||||||
def test_cost_fn_weights(self):
|
def test_cost_fn_weights(self):
|
||||||
FLAGS.least_cost_scheduler_cost_functions = [
|
self.flags(least_cost_scheduler_cost_functions=[
|
||||||
'nova.scheduler.least_cost.noop_cost_fn',
|
'nova.scheduler.least_cost.noop_cost_fn'],
|
||||||
]
|
noop_cost_fn_weight=2)
|
||||||
FLAGS.noop_cost_fn_weight = 2
|
|
||||||
|
|
||||||
num = 1
|
num = 1
|
||||||
request_spec = {}
|
request_spec = {}
|
||||||
@@ -123,10 +119,9 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
|||||||
self.assertWeights(expected, num, request_spec, hosts)
|
self.assertWeights(expected, num, request_spec, hosts)
|
||||||
|
|
||||||
def test_compute_fill_first_cost_fn(self):
|
def test_compute_fill_first_cost_fn(self):
|
||||||
FLAGS.least_cost_scheduler_cost_functions = [
|
self.flags(least_cost_scheduler_cost_functions=[
|
||||||
'nova.scheduler.least_cost.compute_fill_first_cost_fn',
|
'nova.scheduler.least_cost.compute_fill_first_cost_fn'],
|
||||||
]
|
compute_fill_first_cost_fn_weight=1)
|
||||||
FLAGS.compute_fill_first_cost_fn_weight = 1
|
|
||||||
|
|
||||||
num = 1
|
num = 1
|
||||||
instance_type = {'memory_mb': 1024}
|
instance_type = {'memory_mb': 1024}
|
||||||
|
|||||||
@@ -21,7 +21,9 @@ import json
|
|||||||
import nova.db
|
import nova.db
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
|
from nova import rpc
|
||||||
from nova import test
|
from nova import test
|
||||||
|
from nova.compute import api as compute_api
|
||||||
from nova.scheduler import driver
|
from nova.scheduler import driver
|
||||||
from nova.scheduler import zone_aware_scheduler
|
from nova.scheduler import zone_aware_scheduler
|
||||||
from nova.scheduler import zone_manager
|
from nova.scheduler import zone_manager
|
||||||
@@ -114,7 +116,7 @@ def fake_provision_resource_from_blob(context, item, instance_id,
|
|||||||
|
|
||||||
|
|
||||||
def fake_decrypt_blob_returns_local_info(blob):
|
def fake_decrypt_blob_returns_local_info(blob):
|
||||||
return {'foo': True} # values aren't important.
|
return {'hostname': 'foooooo'} # values aren't important.
|
||||||
|
|
||||||
|
|
||||||
def fake_decrypt_blob_returns_child_info(blob):
|
def fake_decrypt_blob_returns_child_info(blob):
|
||||||
@@ -283,14 +285,29 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
|
|||||||
global was_called
|
global was_called
|
||||||
sched = FakeZoneAwareScheduler()
|
sched = FakeZoneAwareScheduler()
|
||||||
was_called = False
|
was_called = False
|
||||||
|
|
||||||
|
def fake_create_db_entry_for_new_instance(self, context,
|
||||||
|
image, base_options, security_group,
|
||||||
|
block_device_mapping, num=1):
|
||||||
|
global was_called
|
||||||
|
was_called = True
|
||||||
|
# return fake instances
|
||||||
|
return {'id': 1, 'uuid': 'f874093c-7b17-49c0-89c3-22a5348497f9'}
|
||||||
|
|
||||||
|
def fake_rpc_cast(*args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
self.stubs.Set(sched, '_decrypt_blob',
|
self.stubs.Set(sched, '_decrypt_blob',
|
||||||
fake_decrypt_blob_returns_local_info)
|
fake_decrypt_blob_returns_local_info)
|
||||||
self.stubs.Set(sched, '_provision_resource_locally',
|
self.stubs.Set(compute_api.API,
|
||||||
fake_provision_resource_locally)
|
'create_db_entry_for_new_instance',
|
||||||
|
fake_create_db_entry_for_new_instance)
|
||||||
|
self.stubs.Set(rpc, 'cast', fake_rpc_cast)
|
||||||
|
|
||||||
request_spec = {'blob': "Non-None blob data"}
|
build_plan_item = {'blob': "Non-None blob data"}
|
||||||
|
request_spec = {'image': {}, 'instance_properties': {}}
|
||||||
|
|
||||||
sched._provision_resource_from_blob(None, request_spec, 1,
|
sched._provision_resource_from_blob(None, build_plan_item, 1,
|
||||||
request_spec, {})
|
request_spec, {})
|
||||||
self.assertTrue(was_called)
|
self.assertTrue(was_called)
|
||||||
|
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ import random
|
|||||||
import StringIO
|
import StringIO
|
||||||
import webob
|
import webob
|
||||||
|
|
||||||
|
from nova import block_device
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import test
|
from nova import test
|
||||||
@@ -147,10 +148,12 @@ class Ec2utilsTestCase(test.TestCase):
|
|||||||
properties0 = {'mappings': mappings}
|
properties0 = {'mappings': mappings}
|
||||||
properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings}
|
properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings}
|
||||||
|
|
||||||
root_device_name = ec2utils.properties_root_device_name(properties0)
|
root_device_name = block_device.properties_root_device_name(
|
||||||
|
properties0)
|
||||||
self.assertEqual(root_device_name, '/dev/sda1')
|
self.assertEqual(root_device_name, '/dev/sda1')
|
||||||
|
|
||||||
root_device_name = ec2utils.properties_root_device_name(properties1)
|
root_device_name = block_device.properties_root_device_name(
|
||||||
|
properties1)
|
||||||
self.assertEqual(root_device_name, '/dev/sdb')
|
self.assertEqual(root_device_name, '/dev/sdb')
|
||||||
|
|
||||||
def test_mapping_prepend_dev(self):
|
def test_mapping_prepend_dev(self):
|
||||||
@@ -184,7 +187,7 @@ class Ec2utilsTestCase(test.TestCase):
|
|||||||
'device': '/dev/sdc1'},
|
'device': '/dev/sdc1'},
|
||||||
{'virtual': 'ephemeral1',
|
{'virtual': 'ephemeral1',
|
||||||
'device': '/dev/sdc1'}]
|
'device': '/dev/sdc1'}]
|
||||||
self.assertDictListMatch(ec2utils.mappings_prepend_dev(mappings),
|
self.assertDictListMatch(block_device.mappings_prepend_dev(mappings),
|
||||||
expected_result)
|
expected_result)
|
||||||
|
|
||||||
|
|
||||||
@@ -336,6 +339,33 @@ class ApiEc2TestCase(test.TestCase):
|
|||||||
|
|
||||||
self.ec2.delete_security_group(security_group_name)
|
self.ec2.delete_security_group(security_group_name)
|
||||||
|
|
||||||
|
def test_group_name_valid_chars_security_group(self):
|
||||||
|
""" Test that we sanely handle invalid security group names.
|
||||||
|
API Spec states we should only accept alphanumeric characters,
|
||||||
|
spaces, dashes, and underscores. """
|
||||||
|
self.expect_http()
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
|
||||||
|
# Test block group_name of non alphanumeric characters, spaces,
|
||||||
|
# dashes, and underscores.
|
||||||
|
security_group_name = "aa #^% -=99"
|
||||||
|
|
||||||
|
self.assertRaises(EC2ResponseError, self.ec2.create_security_group,
|
||||||
|
security_group_name, 'test group')
|
||||||
|
|
||||||
|
def test_group_name_valid_length_security_group(self):
|
||||||
|
"""Test that we sanely handle invalid security group names.
|
||||||
|
API Spec states that the length should not exceed 255 chars """
|
||||||
|
self.expect_http()
|
||||||
|
self.mox.ReplayAll()
|
||||||
|
|
||||||
|
# Test block group_name > 255 chars
|
||||||
|
security_group_name = "".join(random.choice("poiuytrewqasdfghjklmnbvc")
|
||||||
|
for x in range(random.randint(256, 266)))
|
||||||
|
|
||||||
|
self.assertRaises(EC2ResponseError, self.ec2.create_security_group,
|
||||||
|
security_group_name, 'test group')
|
||||||
|
|
||||||
def test_authorize_revoke_security_group_cidr(self):
|
def test_authorize_revoke_security_group_cidr(self):
|
||||||
"""
|
"""
|
||||||
Test that we can add and remove CIDR based rules
|
Test that we can add and remove CIDR based rules
|
||||||
|
|||||||
@@ -62,7 +62,12 @@ class project_generator(object):
|
|||||||
|
|
||||||
|
|
||||||
class user_and_project_generator(object):
|
class user_and_project_generator(object):
|
||||||
def __init__(self, manager, user_state={}, project_state={}):
|
def __init__(self, manager, user_state=None, project_state=None):
|
||||||
|
if not user_state:
|
||||||
|
user_state = {}
|
||||||
|
if not project_state:
|
||||||
|
project_state = {}
|
||||||
|
|
||||||
self.manager = manager
|
self.manager = manager
|
||||||
if 'name' not in user_state:
|
if 'name' not in user_state:
|
||||||
user_state['name'] = 'test1'
|
user_state['name'] = 'test1'
|
||||||
@@ -83,9 +88,9 @@ class user_and_project_generator(object):
|
|||||||
|
|
||||||
class _AuthManagerBaseTestCase(test.TestCase):
|
class _AuthManagerBaseTestCase(test.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
FLAGS.auth_driver = self.auth_driver
|
|
||||||
super(_AuthManagerBaseTestCase, self).setUp()
|
super(_AuthManagerBaseTestCase, self).setUp()
|
||||||
self.flags(connection_type='fake')
|
self.flags(auth_driver=self.auth_driver,
|
||||||
|
connection_type='fake')
|
||||||
self.manager = manager.AuthManager(new=True)
|
self.manager = manager.AuthManager(new=True)
|
||||||
self.manager.mc.cache = {}
|
self.manager.mc.cache = {}
|
||||||
|
|
||||||
|
|||||||
@@ -17,6 +17,8 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
import mox
|
import mox
|
||||||
|
|
||||||
|
import functools
|
||||||
|
|
||||||
from base64 import b64decode
|
from base64 import b64decode
|
||||||
from M2Crypto import BIO
|
from M2Crypto import BIO
|
||||||
from M2Crypto import RSA
|
from M2Crypto import RSA
|
||||||
@@ -99,11 +101,9 @@ class CloudTestCase(test.TestCase):
|
|||||||
"""Makes sure describe regions runs without raising an exception"""
|
"""Makes sure describe regions runs without raising an exception"""
|
||||||
result = self.cloud.describe_regions(self.context)
|
result = self.cloud.describe_regions(self.context)
|
||||||
self.assertEqual(len(result['regionInfo']), 1)
|
self.assertEqual(len(result['regionInfo']), 1)
|
||||||
regions = FLAGS.region_list
|
self.flags(region_list=["one=test_host1", "two=test_host2"])
|
||||||
FLAGS.region_list = ["one=test_host1", "two=test_host2"]
|
|
||||||
result = self.cloud.describe_regions(self.context)
|
result = self.cloud.describe_regions(self.context)
|
||||||
self.assertEqual(len(result['regionInfo']), 2)
|
self.assertEqual(len(result['regionInfo']), 2)
|
||||||
FLAGS.region_list = regions
|
|
||||||
|
|
||||||
def test_describe_addresses(self):
|
def test_describe_addresses(self):
|
||||||
"""Makes sure describe addresses runs without raising an exception"""
|
"""Makes sure describe addresses runs without raising an exception"""
|
||||||
@@ -894,13 +894,16 @@ class CloudTestCase(test.TestCase):
|
|||||||
def test_modify_image_attribute(self):
|
def test_modify_image_attribute(self):
|
||||||
modify_image_attribute = self.cloud.modify_image_attribute
|
modify_image_attribute = self.cloud.modify_image_attribute
|
||||||
|
|
||||||
|
fake_metadata = {'id': 1, 'container_format': 'ami',
|
||||||
|
'properties': {'kernel_id': 1, 'ramdisk_id': 1,
|
||||||
|
'type': 'machine'}, 'is_public': False}
|
||||||
|
|
||||||
def fake_show(meh, context, id):
|
def fake_show(meh, context, id):
|
||||||
return {'id': 1, 'container_format': 'ami',
|
return fake_metadata
|
||||||
'properties': {'kernel_id': 1, 'ramdisk_id': 1,
|
|
||||||
'type': 'machine'}, 'is_public': False}
|
|
||||||
|
|
||||||
def fake_update(meh, context, image_id, metadata, data=None):
|
def fake_update(meh, context, image_id, metadata, data=None):
|
||||||
return metadata
|
fake_metadata.update(metadata)
|
||||||
|
return fake_metadata
|
||||||
|
|
||||||
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
|
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
|
||||||
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
|
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
|
||||||
@@ -1466,3 +1469,147 @@ class CloudTestCase(test.TestCase):
|
|||||||
# TODO(yamahata): clean up snapshot created by CreateImage.
|
# TODO(yamahata): clean up snapshot created by CreateImage.
|
||||||
|
|
||||||
self._restart_compute_service()
|
self._restart_compute_service()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _fake_bdm_get(ctxt, id):
|
||||||
|
return [{'volume_id': 87654321,
|
||||||
|
'snapshot_id': None,
|
||||||
|
'no_device': None,
|
||||||
|
'virtual_name': None,
|
||||||
|
'delete_on_termination': True,
|
||||||
|
'device_name': '/dev/sdh'},
|
||||||
|
{'volume_id': None,
|
||||||
|
'snapshot_id': 98765432,
|
||||||
|
'no_device': None,
|
||||||
|
'virtual_name': None,
|
||||||
|
'delete_on_termination': True,
|
||||||
|
'device_name': '/dev/sdi'},
|
||||||
|
{'volume_id': None,
|
||||||
|
'snapshot_id': None,
|
||||||
|
'no_device': True,
|
||||||
|
'virtual_name': None,
|
||||||
|
'delete_on_termination': None,
|
||||||
|
'device_name': None},
|
||||||
|
{'volume_id': None,
|
||||||
|
'snapshot_id': None,
|
||||||
|
'no_device': None,
|
||||||
|
'virtual_name': 'ephemeral0',
|
||||||
|
'delete_on_termination': None,
|
||||||
|
'device_name': '/dev/sdb'},
|
||||||
|
{'volume_id': None,
|
||||||
|
'snapshot_id': None,
|
||||||
|
'no_device': None,
|
||||||
|
'virtual_name': 'swap',
|
||||||
|
'delete_on_termination': None,
|
||||||
|
'device_name': '/dev/sdc'},
|
||||||
|
{'volume_id': None,
|
||||||
|
'snapshot_id': None,
|
||||||
|
'no_device': None,
|
||||||
|
'virtual_name': 'ephemeral1',
|
||||||
|
'delete_on_termination': None,
|
||||||
|
'device_name': '/dev/sdd'},
|
||||||
|
{'volume_id': None,
|
||||||
|
'snapshot_id': None,
|
||||||
|
'no_device': None,
|
||||||
|
'virtual_name': 'ephemeral2',
|
||||||
|
'delete_on_termination': None,
|
||||||
|
'device_name': '/dev/sd3'},
|
||||||
|
]
|
||||||
|
|
||||||
|
def test_get_instance_mapping(self):
|
||||||
|
"""Make sure that _get_instance_mapping works"""
|
||||||
|
ctxt = None
|
||||||
|
instance_ref0 = {'id': 0,
|
||||||
|
'root_device_name': None}
|
||||||
|
instance_ref1 = {'id': 0,
|
||||||
|
'root_device_name': '/dev/sda1'}
|
||||||
|
|
||||||
|
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
|
||||||
|
self._fake_bdm_get)
|
||||||
|
|
||||||
|
expected = {'ami': 'sda1',
|
||||||
|
'root': '/dev/sda1',
|
||||||
|
'ephemeral0': '/dev/sdb',
|
||||||
|
'swap': '/dev/sdc',
|
||||||
|
'ephemeral1': '/dev/sdd',
|
||||||
|
'ephemeral2': '/dev/sd3'}
|
||||||
|
|
||||||
|
self.assertEqual(self.cloud._format_instance_mapping(ctxt,
|
||||||
|
instance_ref0),
|
||||||
|
cloud._DEFAULT_MAPPINGS)
|
||||||
|
self.assertEqual(self.cloud._format_instance_mapping(ctxt,
|
||||||
|
instance_ref1),
|
||||||
|
expected)
|
||||||
|
|
||||||
|
def test_describe_instance_attribute(self):
|
||||||
|
"""Make sure that describe_instance_attribute works"""
|
||||||
|
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
|
||||||
|
self._fake_bdm_get)
|
||||||
|
|
||||||
|
def fake_get(ctxt, instance_id):
|
||||||
|
return {
|
||||||
|
'id': 0,
|
||||||
|
'root_device_name': '/dev/sdh',
|
||||||
|
'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}],
|
||||||
|
'state_description': 'stopping',
|
||||||
|
'instance_type': {'name': 'fake_type'},
|
||||||
|
'kernel_id': 1,
|
||||||
|
'ramdisk_id': 2,
|
||||||
|
'user_data': 'fake-user data',
|
||||||
|
}
|
||||||
|
self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
|
||||||
|
|
||||||
|
def fake_volume_get(ctxt, volume_id, session=None):
|
||||||
|
if volume_id == 87654321:
|
||||||
|
return {'id': volume_id,
|
||||||
|
'attach_time': '13:56:24',
|
||||||
|
'status': 'in-use'}
|
||||||
|
raise exception.VolumeNotFound(volume_id=volume_id)
|
||||||
|
self.stubs.Set(db.api, 'volume_get', fake_volume_get)
|
||||||
|
|
||||||
|
get_attribute = functools.partial(
|
||||||
|
self.cloud.describe_instance_attribute,
|
||||||
|
self.context, 'i-12345678')
|
||||||
|
|
||||||
|
bdm = get_attribute('blockDeviceMapping')
|
||||||
|
bdm['blockDeviceMapping'].sort()
|
||||||
|
|
||||||
|
expected_bdm = {'instance_id': 'i-12345678',
|
||||||
|
'rootDeviceType': 'ebs',
|
||||||
|
'blockDeviceMapping': [
|
||||||
|
{'deviceName': '/dev/sdh',
|
||||||
|
'ebs': {'status': 'in-use',
|
||||||
|
'deleteOnTermination': True,
|
||||||
|
'volumeId': 87654321,
|
||||||
|
'attachTime': '13:56:24'}}]}
|
||||||
|
expected_bdm['blockDeviceMapping'].sort()
|
||||||
|
self.assertEqual(bdm, expected_bdm)
|
||||||
|
# NOTE(yamahata): this isn't supported
|
||||||
|
# get_attribute('disableApiTermination')
|
||||||
|
groupSet = get_attribute('groupSet')
|
||||||
|
groupSet['groupSet'].sort()
|
||||||
|
expected_groupSet = {'instance_id': 'i-12345678',
|
||||||
|
'groupSet': [{'groupId': 'fake0'},
|
||||||
|
{'groupId': 'fake1'}]}
|
||||||
|
expected_groupSet['groupSet'].sort()
|
||||||
|
self.assertEqual(groupSet, expected_groupSet)
|
||||||
|
self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'),
|
||||||
|
{'instance_id': 'i-12345678',
|
||||||
|
'instanceInitiatedShutdownBehavior': 'stop'})
|
||||||
|
self.assertEqual(get_attribute('instanceType'),
|
||||||
|
{'instance_id': 'i-12345678',
|
||||||
|
'instanceType': 'fake_type'})
|
||||||
|
self.assertEqual(get_attribute('kernel'),
|
||||||
|
{'instance_id': 'i-12345678',
|
||||||
|
'kernel': 'aki-00000001'})
|
||||||
|
self.assertEqual(get_attribute('ramdisk'),
|
||||||
|
{'instance_id': 'i-12345678',
|
||||||
|
'ramdisk': 'ari-00000002'})
|
||||||
|
self.assertEqual(get_attribute('rootDeviceName'),
|
||||||
|
{'instance_id': 'i-12345678',
|
||||||
|
'rootDeviceName': '/dev/sdh'})
|
||||||
|
# NOTE(yamahata): this isn't supported
|
||||||
|
# get_attribute('sourceDestCheck')
|
||||||
|
self.assertEqual(get_attribute('userData'),
|
||||||
|
{'instance_id': 'i-12345678',
|
||||||
|
'userData': '}\xa9\x1e\xba\xc7\xabu\xabZ'})
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ from nova.compute import power_state
|
|||||||
from nova import context
|
from nova import context
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova.db.sqlalchemy import models
|
from nova.db.sqlalchemy import models
|
||||||
|
from nova.db.sqlalchemy import api as sqlalchemy_api
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
import nova.image.fake
|
import nova.image.fake
|
||||||
@@ -73,8 +74,11 @@ class ComputeTestCase(test.TestCase):
|
|||||||
|
|
||||||
self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show)
|
self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show)
|
||||||
|
|
||||||
def _create_instance(self, params={}):
|
def _create_instance(self, params=None):
|
||||||
"""Create a test instance"""
|
"""Create a test instance"""
|
||||||
|
if not params:
|
||||||
|
params = {}
|
||||||
|
|
||||||
inst = {}
|
inst = {}
|
||||||
inst['image_ref'] = 1
|
inst['image_ref'] = 1
|
||||||
inst['reservation_id'] = 'r-fakeres'
|
inst['reservation_id'] = 'r-fakeres'
|
||||||
@@ -87,8 +91,11 @@ class ComputeTestCase(test.TestCase):
|
|||||||
inst.update(params)
|
inst.update(params)
|
||||||
return db.instance_create(self.context, inst)['id']
|
return db.instance_create(self.context, inst)['id']
|
||||||
|
|
||||||
def _create_instance_type(self, params={}):
|
def _create_instance_type(self, params=None):
|
||||||
"""Create a test instance"""
|
"""Create a test instance"""
|
||||||
|
if not params:
|
||||||
|
params = {}
|
||||||
|
|
||||||
context = self.context.elevated()
|
context = self.context.elevated()
|
||||||
inst = {}
|
inst = {}
|
||||||
inst['name'] = 'm1.small'
|
inst['name'] = 'm1.small'
|
||||||
@@ -535,7 +542,9 @@ class ComputeTestCase(test.TestCase):
|
|||||||
|
|
||||||
db.instance_update(self.context, instance_id, {'host': 'foo'})
|
db.instance_update(self.context, instance_id, {'host': 'foo'})
|
||||||
|
|
||||||
self.compute.prep_resize(context, inst_ref['uuid'], 3)
|
new_instance_type_ref = db.instance_type_get_by_flavor_id(context, 3)
|
||||||
|
self.compute.prep_resize(context, inst_ref['uuid'],
|
||||||
|
new_instance_type_ref['id'])
|
||||||
|
|
||||||
migration_ref = db.migration_get_by_instance_and_status(context,
|
migration_ref = db.migration_get_by_instance_and_status(context,
|
||||||
inst_ref['uuid'], 'pre-migrating')
|
inst_ref['uuid'], 'pre-migrating')
|
||||||
@@ -862,6 +871,458 @@ class ComputeTestCase(test.TestCase):
|
|||||||
self.assertEqual(len(instances), 1)
|
self.assertEqual(len(instances), 1)
|
||||||
self.assertEqual(power_state.SHUTOFF, instances[0]['state'])
|
self.assertEqual(power_state.SHUTOFF, instances[0]['state'])
|
||||||
|
|
||||||
|
def test_get_all_by_name_regexp(self):
|
||||||
|
"""Test searching instances by name (display_name)"""
|
||||||
|
c = context.get_admin_context()
|
||||||
|
instance_id1 = self._create_instance({'display_name': 'woot'})
|
||||||
|
instance_id2 = self._create_instance({
|
||||||
|
'display_name': 'woo',
|
||||||
|
'id': 20})
|
||||||
|
instance_id3 = self._create_instance({
|
||||||
|
'display_name': 'not-woot',
|
||||||
|
'id': 30})
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'name': 'woo.*'})
|
||||||
|
self.assertEqual(len(instances), 2)
|
||||||
|
instance_ids = [instance.id for instance in instances]
|
||||||
|
self.assertTrue(instance_id1 in instance_ids)
|
||||||
|
self.assertTrue(instance_id2 in instance_ids)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'name': 'woot.*'})
|
||||||
|
instance_ids = [instance.id for instance in instances]
|
||||||
|
self.assertEqual(len(instances), 1)
|
||||||
|
self.assertTrue(instance_id1 in instance_ids)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'name': '.*oot.*'})
|
||||||
|
self.assertEqual(len(instances), 2)
|
||||||
|
instance_ids = [instance.id for instance in instances]
|
||||||
|
self.assertTrue(instance_id1 in instance_ids)
|
||||||
|
self.assertTrue(instance_id3 in instance_ids)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'name': 'n.*'})
|
||||||
|
self.assertEqual(len(instances), 1)
|
||||||
|
instance_ids = [instance.id for instance in instances]
|
||||||
|
self.assertTrue(instance_id3 in instance_ids)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'name': 'noth.*'})
|
||||||
|
self.assertEqual(len(instances), 0)
|
||||||
|
|
||||||
|
db.instance_destroy(c, instance_id1)
|
||||||
|
db.instance_destroy(c, instance_id2)
|
||||||
|
db.instance_destroy(c, instance_id3)
|
||||||
|
|
||||||
|
def test_get_all_by_instance_name_regexp(self):
|
||||||
|
"""Test searching instances by name"""
|
||||||
|
self.flags(instance_name_template='instance-%d')
|
||||||
|
|
||||||
|
c = context.get_admin_context()
|
||||||
|
instance_id1 = self._create_instance()
|
||||||
|
instance_id2 = self._create_instance({'id': 2})
|
||||||
|
instance_id3 = self._create_instance({'id': 10})
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'instance_name': 'instance.*'})
|
||||||
|
self.assertEqual(len(instances), 3)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'instance_name': '.*\-\d$'})
|
||||||
|
self.assertEqual(len(instances), 2)
|
||||||
|
instance_ids = [instance.id for instance in instances]
|
||||||
|
self.assertTrue(instance_id1 in instance_ids)
|
||||||
|
self.assertTrue(instance_id2 in instance_ids)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'instance_name': 'i.*2'})
|
||||||
|
self.assertEqual(len(instances), 1)
|
||||||
|
self.assertEqual(instances[0].id, instance_id2)
|
||||||
|
|
||||||
|
db.instance_destroy(c, instance_id1)
|
||||||
|
db.instance_destroy(c, instance_id2)
|
||||||
|
db.instance_destroy(c, instance_id3)
|
||||||
|
|
||||||
|
def test_get_by_fixed_ip(self):
|
||||||
|
"""Test getting 1 instance by Fixed IP"""
|
||||||
|
c = context.get_admin_context()
|
||||||
|
instance_id1 = self._create_instance()
|
||||||
|
instance_id2 = self._create_instance({'id': 20})
|
||||||
|
instance_id3 = self._create_instance({'id': 30})
|
||||||
|
|
||||||
|
vif_ref1 = db.virtual_interface_create(c,
|
||||||
|
{'address': '12:34:56:78:90:12',
|
||||||
|
'instance_id': instance_id1,
|
||||||
|
'network_id': 1})
|
||||||
|
vif_ref2 = db.virtual_interface_create(c,
|
||||||
|
{'address': '90:12:34:56:78:90',
|
||||||
|
'instance_id': instance_id2,
|
||||||
|
'network_id': 1})
|
||||||
|
|
||||||
|
db.fixed_ip_create(c,
|
||||||
|
{'address': '1.1.1.1',
|
||||||
|
'instance_id': instance_id1,
|
||||||
|
'virtual_interface_id': vif_ref1['id']})
|
||||||
|
db.fixed_ip_create(c,
|
||||||
|
{'address': '1.1.2.1',
|
||||||
|
'instance_id': instance_id2,
|
||||||
|
'virtual_interface_id': vif_ref2['id']})
|
||||||
|
|
||||||
|
# regex not allowed
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'fixed_ip': '.*'})
|
||||||
|
self.assertEqual(len(instances), 0)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'fixed_ip': '1.1.3.1'})
|
||||||
|
self.assertEqual(len(instances), 0)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'fixed_ip': '1.1.1.1'})
|
||||||
|
self.assertEqual(len(instances), 1)
|
||||||
|
self.assertEqual(instances[0].id, instance_id1)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'fixed_ip': '1.1.2.1'})
|
||||||
|
self.assertEqual(len(instances), 1)
|
||||||
|
self.assertEqual(instances[0].id, instance_id2)
|
||||||
|
|
||||||
|
db.virtual_interface_delete(c, vif_ref1['id'])
|
||||||
|
db.virtual_interface_delete(c, vif_ref2['id'])
|
||||||
|
db.instance_destroy(c, instance_id1)
|
||||||
|
db.instance_destroy(c, instance_id2)
|
||||||
|
|
||||||
|
def test_get_all_by_ip_regexp(self):
|
||||||
|
"""Test searching by Floating and Fixed IP"""
|
||||||
|
c = context.get_admin_context()
|
||||||
|
instance_id1 = self._create_instance({'display_name': 'woot'})
|
||||||
|
instance_id2 = self._create_instance({
|
||||||
|
'display_name': 'woo',
|
||||||
|
'id': 20})
|
||||||
|
instance_id3 = self._create_instance({
|
||||||
|
'display_name': 'not-woot',
|
||||||
|
'id': 30})
|
||||||
|
|
||||||
|
vif_ref1 = db.virtual_interface_create(c,
|
||||||
|
{'address': '12:34:56:78:90:12',
|
||||||
|
'instance_id': instance_id1,
|
||||||
|
'network_id': 1})
|
||||||
|
vif_ref2 = db.virtual_interface_create(c,
|
||||||
|
{'address': '90:12:34:56:78:90',
|
||||||
|
'instance_id': instance_id2,
|
||||||
|
'network_id': 1})
|
||||||
|
vif_ref3 = db.virtual_interface_create(c,
|
||||||
|
{'address': '34:56:78:90:12:34',
|
||||||
|
'instance_id': instance_id3,
|
||||||
|
'network_id': 1})
|
||||||
|
|
||||||
|
db.fixed_ip_create(c,
|
||||||
|
{'address': '1.1.1.1',
|
||||||
|
'instance_id': instance_id1,
|
||||||
|
'virtual_interface_id': vif_ref1['id']})
|
||||||
|
db.fixed_ip_create(c,
|
||||||
|
{'address': '1.1.2.1',
|
||||||
|
'instance_id': instance_id2,
|
||||||
|
'virtual_interface_id': vif_ref2['id']})
|
||||||
|
fix_addr = db.fixed_ip_create(c,
|
||||||
|
{'address': '1.1.3.1',
|
||||||
|
'instance_id': instance_id3,
|
||||||
|
'virtual_interface_id': vif_ref3['id']})
|
||||||
|
fix_ref = db.fixed_ip_get_by_address(c, fix_addr)
|
||||||
|
flo_ref = db.floating_ip_create(c,
|
||||||
|
{'address': '10.0.0.2',
|
||||||
|
'fixed_ip_id': fix_ref['id']})
|
||||||
|
|
||||||
|
# ends up matching 2nd octet here.. so all 3 match
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'ip': '.*\.1'})
|
||||||
|
self.assertEqual(len(instances), 3)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'ip': '1.*'})
|
||||||
|
self.assertEqual(len(instances), 3)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'ip': '.*\.1.\d+$'})
|
||||||
|
self.assertEqual(len(instances), 1)
|
||||||
|
instance_ids = [instance.id for instance in instances]
|
||||||
|
self.assertTrue(instance_id1 in instance_ids)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'ip': '.*\.2.+'})
|
||||||
|
self.assertEqual(len(instances), 1)
|
||||||
|
self.assertEqual(instances[0].id, instance_id2)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'ip': '10.*'})
|
||||||
|
self.assertEqual(len(instances), 1)
|
||||||
|
self.assertEqual(instances[0].id, instance_id3)
|
||||||
|
|
||||||
|
db.virtual_interface_delete(c, vif_ref1['id'])
|
||||||
|
db.virtual_interface_delete(c, vif_ref2['id'])
|
||||||
|
db.virtual_interface_delete(c, vif_ref3['id'])
|
||||||
|
db.floating_ip_destroy(c, '10.0.0.2')
|
||||||
|
db.instance_destroy(c, instance_id1)
|
||||||
|
db.instance_destroy(c, instance_id2)
|
||||||
|
db.instance_destroy(c, instance_id3)
|
||||||
|
|
||||||
|
def test_get_all_by_ipv6_regexp(self):
|
||||||
|
"""Test searching by IPv6 address"""
|
||||||
|
|
||||||
|
c = context.get_admin_context()
|
||||||
|
instance_id1 = self._create_instance({'display_name': 'woot'})
|
||||||
|
instance_id2 = self._create_instance({
|
||||||
|
'display_name': 'woo',
|
||||||
|
'id': 20})
|
||||||
|
instance_id3 = self._create_instance({
|
||||||
|
'display_name': 'not-woot',
|
||||||
|
'id': 30})
|
||||||
|
|
||||||
|
vif_ref1 = db.virtual_interface_create(c,
|
||||||
|
{'address': '12:34:56:78:90:12',
|
||||||
|
'instance_id': instance_id1,
|
||||||
|
'network_id': 1})
|
||||||
|
vif_ref2 = db.virtual_interface_create(c,
|
||||||
|
{'address': '90:12:34:56:78:90',
|
||||||
|
'instance_id': instance_id2,
|
||||||
|
'network_id': 1})
|
||||||
|
vif_ref3 = db.virtual_interface_create(c,
|
||||||
|
{'address': '34:56:78:90:12:34',
|
||||||
|
'instance_id': instance_id3,
|
||||||
|
'network_id': 1})
|
||||||
|
|
||||||
|
# This will create IPv6 addresses of:
|
||||||
|
# 1: fd00::1034:56ff:fe78:9012
|
||||||
|
# 20: fd00::9212:34ff:fe56:7890
|
||||||
|
# 30: fd00::3656:78ff:fe90:1234
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'ip6': '.*1034.*'})
|
||||||
|
self.assertEqual(len(instances), 1)
|
||||||
|
self.assertEqual(instances[0].id, instance_id1)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'ip6': '^fd00.*'})
|
||||||
|
self.assertEqual(len(instances), 3)
|
||||||
|
instance_ids = [instance.id for instance in instances]
|
||||||
|
self.assertTrue(instance_id1 in instance_ids)
|
||||||
|
self.assertTrue(instance_id2 in instance_ids)
|
||||||
|
self.assertTrue(instance_id3 in instance_ids)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'ip6': '^.*12.*34.*'})
|
||||||
|
self.assertEqual(len(instances), 2)
|
||||||
|
instance_ids = [instance.id for instance in instances]
|
||||||
|
self.assertTrue(instance_id2 in instance_ids)
|
||||||
|
self.assertTrue(instance_id3 in instance_ids)
|
||||||
|
|
||||||
|
db.virtual_interface_delete(c, vif_ref1['id'])
|
||||||
|
db.virtual_interface_delete(c, vif_ref2['id'])
|
||||||
|
db.virtual_interface_delete(c, vif_ref3['id'])
|
||||||
|
db.instance_destroy(c, instance_id1)
|
||||||
|
db.instance_destroy(c, instance_id2)
|
||||||
|
db.instance_destroy(c, instance_id3)
|
||||||
|
|
||||||
|
def test_get_all_by_multiple_options_at_once(self):
|
||||||
|
"""Test searching by multiple options at once"""
|
||||||
|
c = context.get_admin_context()
|
||||||
|
instance_id1 = self._create_instance({'display_name': 'woot'})
|
||||||
|
instance_id2 = self._create_instance({
|
||||||
|
'display_name': 'woo',
|
||||||
|
'id': 20})
|
||||||
|
instance_id3 = self._create_instance({
|
||||||
|
'display_name': 'not-woot',
|
||||||
|
'id': 30})
|
||||||
|
|
||||||
|
vif_ref1 = db.virtual_interface_create(c,
|
||||||
|
{'address': '12:34:56:78:90:12',
|
||||||
|
'instance_id': instance_id1,
|
||||||
|
'network_id': 1})
|
||||||
|
vif_ref2 = db.virtual_interface_create(c,
|
||||||
|
{'address': '90:12:34:56:78:90',
|
||||||
|
'instance_id': instance_id2,
|
||||||
|
'network_id': 1})
|
||||||
|
vif_ref3 = db.virtual_interface_create(c,
|
||||||
|
{'address': '34:56:78:90:12:34',
|
||||||
|
'instance_id': instance_id3,
|
||||||
|
'network_id': 1})
|
||||||
|
|
||||||
|
db.fixed_ip_create(c,
|
||||||
|
{'address': '1.1.1.1',
|
||||||
|
'instance_id': instance_id1,
|
||||||
|
'virtual_interface_id': vif_ref1['id']})
|
||||||
|
db.fixed_ip_create(c,
|
||||||
|
{'address': '1.1.2.1',
|
||||||
|
'instance_id': instance_id2,
|
||||||
|
'virtual_interface_id': vif_ref2['id']})
|
||||||
|
fix_addr = db.fixed_ip_create(c,
|
||||||
|
{'address': '1.1.3.1',
|
||||||
|
'instance_id': instance_id3,
|
||||||
|
'virtual_interface_id': vif_ref3['id']})
|
||||||
|
fix_ref = db.fixed_ip_get_by_address(c, fix_addr)
|
||||||
|
flo_ref = db.floating_ip_create(c,
|
||||||
|
{'address': '10.0.0.2',
|
||||||
|
'fixed_ip_id': fix_ref['id']})
|
||||||
|
|
||||||
|
# ip ends up matching 2nd octet here.. so all 3 match ip
|
||||||
|
# but 'name' only matches one
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'ip': '.*\.1', 'name': 'not.*'})
|
||||||
|
self.assertEqual(len(instances), 1)
|
||||||
|
self.assertEqual(instances[0].id, instance_id3)
|
||||||
|
|
||||||
|
# ip ends up matching any ip with a '2' in it.. so instance
|
||||||
|
# 2 and 3.. but name should only match #2
|
||||||
|
# but 'name' only matches one
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'ip': '.*2', 'name': '^woo.*'})
|
||||||
|
self.assertEqual(len(instances), 1)
|
||||||
|
self.assertEqual(instances[0].id, instance_id2)
|
||||||
|
|
||||||
|
# same as above but no match on name (name matches instance_id1
|
||||||
|
# but the ip query doesn't
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'ip': '.*2.*', 'name': '^woot.*'})
|
||||||
|
self.assertEqual(len(instances), 0)
|
||||||
|
|
||||||
|
# ip matches all 3... ipv6 matches #2+#3...name matches #3
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'ip': '.*\.1',
|
||||||
|
'name': 'not.*',
|
||||||
|
'ip6': '^.*12.*34.*'})
|
||||||
|
self.assertEqual(len(instances), 1)
|
||||||
|
self.assertEqual(instances[0].id, instance_id3)
|
||||||
|
|
||||||
|
db.virtual_interface_delete(c, vif_ref1['id'])
|
||||||
|
db.virtual_interface_delete(c, vif_ref2['id'])
|
||||||
|
db.virtual_interface_delete(c, vif_ref3['id'])
|
||||||
|
db.floating_ip_destroy(c, '10.0.0.2')
|
||||||
|
db.instance_destroy(c, instance_id1)
|
||||||
|
db.instance_destroy(c, instance_id2)
|
||||||
|
db.instance_destroy(c, instance_id3)
|
||||||
|
|
||||||
|
def test_get_all_by_image(self):
|
||||||
|
"""Test searching instances by image"""
|
||||||
|
|
||||||
|
c = context.get_admin_context()
|
||||||
|
instance_id1 = self._create_instance({'image_ref': '1234'})
|
||||||
|
instance_id2 = self._create_instance({
|
||||||
|
'id': 2,
|
||||||
|
'image_ref': '4567'})
|
||||||
|
instance_id3 = self._create_instance({
|
||||||
|
'id': 10,
|
||||||
|
'image_ref': '4567'})
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'image': '123'})
|
||||||
|
self.assertEqual(len(instances), 0)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'image': '1234'})
|
||||||
|
self.assertEqual(len(instances), 1)
|
||||||
|
self.assertEqual(instances[0].id, instance_id1)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'image': '4567'})
|
||||||
|
self.assertEqual(len(instances), 2)
|
||||||
|
instance_ids = [instance.id for instance in instances]
|
||||||
|
self.assertTrue(instance_id2 in instance_ids)
|
||||||
|
self.assertTrue(instance_id3 in instance_ids)
|
||||||
|
|
||||||
|
# Test passing a list as search arg
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'image': ['1234', '4567']})
|
||||||
|
self.assertEqual(len(instances), 3)
|
||||||
|
|
||||||
|
db.instance_destroy(c, instance_id1)
|
||||||
|
db.instance_destroy(c, instance_id2)
|
||||||
|
db.instance_destroy(c, instance_id3)
|
||||||
|
|
||||||
|
def test_get_all_by_flavor(self):
|
||||||
|
"""Test searching instances by image"""
|
||||||
|
|
||||||
|
c = context.get_admin_context()
|
||||||
|
instance_id1 = self._create_instance({'instance_type_id': 1})
|
||||||
|
instance_id2 = self._create_instance({
|
||||||
|
'id': 2,
|
||||||
|
'instance_type_id': 2})
|
||||||
|
instance_id3 = self._create_instance({
|
||||||
|
'id': 10,
|
||||||
|
'instance_type_id': 2})
|
||||||
|
|
||||||
|
# NOTE(comstud): Migrations set up the instance_types table
|
||||||
|
# for us. Therefore, we assume the following is true for
|
||||||
|
# these tests:
|
||||||
|
# instance_type_id 1 == flavor 3
|
||||||
|
# instance_type_id 2 == flavor 1
|
||||||
|
# instance_type_id 3 == flavor 4
|
||||||
|
# instance_type_id 4 == flavor 5
|
||||||
|
# instance_type_id 5 == flavor 2
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'flavor': 5})
|
||||||
|
self.assertEqual(len(instances), 0)
|
||||||
|
|
||||||
|
self.assertRaises(exception.FlavorNotFound,
|
||||||
|
self.compute_api.get_all,
|
||||||
|
c, search_opts={'flavor': 99})
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'flavor': 3})
|
||||||
|
self.assertEqual(len(instances), 1)
|
||||||
|
self.assertEqual(instances[0].id, instance_id1)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'flavor': 1})
|
||||||
|
self.assertEqual(len(instances), 2)
|
||||||
|
instance_ids = [instance.id for instance in instances]
|
||||||
|
self.assertTrue(instance_id2 in instance_ids)
|
||||||
|
self.assertTrue(instance_id3 in instance_ids)
|
||||||
|
|
||||||
|
db.instance_destroy(c, instance_id1)
|
||||||
|
db.instance_destroy(c, instance_id2)
|
||||||
|
db.instance_destroy(c, instance_id3)
|
||||||
|
|
||||||
|
def test_get_all_by_state(self):
|
||||||
|
"""Test searching instances by state"""
|
||||||
|
|
||||||
|
c = context.get_admin_context()
|
||||||
|
instance_id1 = self._create_instance({'state': power_state.SHUTDOWN})
|
||||||
|
instance_id2 = self._create_instance({
|
||||||
|
'id': 2,
|
||||||
|
'state': power_state.RUNNING})
|
||||||
|
instance_id3 = self._create_instance({
|
||||||
|
'id': 10,
|
||||||
|
'state': power_state.RUNNING})
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'state': power_state.SUSPENDED})
|
||||||
|
self.assertEqual(len(instances), 0)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'state': power_state.SHUTDOWN})
|
||||||
|
self.assertEqual(len(instances), 1)
|
||||||
|
self.assertEqual(instances[0].id, instance_id1)
|
||||||
|
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'state': power_state.RUNNING})
|
||||||
|
self.assertEqual(len(instances), 2)
|
||||||
|
instance_ids = [instance.id for instance in instances]
|
||||||
|
self.assertTrue(instance_id2 in instance_ids)
|
||||||
|
self.assertTrue(instance_id3 in instance_ids)
|
||||||
|
|
||||||
|
# Test passing a list as search arg
|
||||||
|
instances = self.compute_api.get_all(c,
|
||||||
|
search_opts={'state': [power_state.SHUTDOWN,
|
||||||
|
power_state.RUNNING]})
|
||||||
|
self.assertEqual(len(instances), 3)
|
||||||
|
|
||||||
|
db.instance_destroy(c, instance_id1)
|
||||||
|
db.instance_destroy(c, instance_id2)
|
||||||
|
db.instance_destroy(c, instance_id3)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _parse_db_block_device_mapping(bdm_ref):
|
def _parse_db_block_device_mapping(bdm_ref):
|
||||||
attr_list = ('delete_on_termination', 'device_name', 'no_device',
|
attr_list = ('delete_on_termination', 'device_name', 'no_device',
|
||||||
@@ -875,15 +1336,17 @@ class ComputeTestCase(test.TestCase):
|
|||||||
return bdm
|
return bdm
|
||||||
|
|
||||||
def test_update_block_device_mapping(self):
|
def test_update_block_device_mapping(self):
|
||||||
|
swap_size = 1
|
||||||
|
instance_type = {'swap': swap_size}
|
||||||
instance_id = self._create_instance()
|
instance_id = self._create_instance()
|
||||||
mappings = [
|
mappings = [
|
||||||
{'virtual': 'ami', 'device': 'sda1'},
|
{'virtual': 'ami', 'device': 'sda1'},
|
||||||
{'virtual': 'root', 'device': '/dev/sda1'},
|
{'virtual': 'root', 'device': '/dev/sda1'},
|
||||||
|
|
||||||
{'virtual': 'swap', 'device': 'sdb1'},
|
|
||||||
{'virtual': 'swap', 'device': 'sdb2'},
|
|
||||||
{'virtual': 'swap', 'device': 'sdb3'},
|
|
||||||
{'virtual': 'swap', 'device': 'sdb4'},
|
{'virtual': 'swap', 'device': 'sdb4'},
|
||||||
|
{'virtual': 'swap', 'device': 'sdb3'},
|
||||||
|
{'virtual': 'swap', 'device': 'sdb2'},
|
||||||
|
{'virtual': 'swap', 'device': 'sdb1'},
|
||||||
|
|
||||||
{'virtual': 'ephemeral0', 'device': 'sdc1'},
|
{'virtual': 'ephemeral0', 'device': 'sdc1'},
|
||||||
{'virtual': 'ephemeral1', 'device': 'sdc2'},
|
{'virtual': 'ephemeral1', 'device': 'sdc2'},
|
||||||
@@ -925,32 +1388,36 @@ class ComputeTestCase(test.TestCase):
|
|||||||
'no_device': True}]
|
'no_device': True}]
|
||||||
|
|
||||||
self.compute_api._update_image_block_device_mapping(
|
self.compute_api._update_image_block_device_mapping(
|
||||||
self.context, instance_id, mappings)
|
self.context, instance_type, instance_id, mappings)
|
||||||
|
|
||||||
bdms = [self._parse_db_block_device_mapping(bdm_ref)
|
bdms = [self._parse_db_block_device_mapping(bdm_ref)
|
||||||
for bdm_ref in db.block_device_mapping_get_all_by_instance(
|
for bdm_ref in db.block_device_mapping_get_all_by_instance(
|
||||||
self.context, instance_id)]
|
self.context, instance_id)]
|
||||||
expected_result = [
|
expected_result = [
|
||||||
{'virtual_name': 'swap', 'device_name': '/dev/sdb1'},
|
{'virtual_name': 'swap', 'device_name': '/dev/sdb1',
|
||||||
{'virtual_name': 'swap', 'device_name': '/dev/sdb2'},
|
'volume_size': swap_size},
|
||||||
{'virtual_name': 'swap', 'device_name': '/dev/sdb3'},
|
|
||||||
{'virtual_name': 'swap', 'device_name': '/dev/sdb4'},
|
|
||||||
{'virtual_name': 'ephemeral0', 'device_name': '/dev/sdc1'},
|
{'virtual_name': 'ephemeral0', 'device_name': '/dev/sdc1'},
|
||||||
{'virtual_name': 'ephemeral1', 'device_name': '/dev/sdc2'},
|
|
||||||
{'virtual_name': 'ephemeral2', 'device_name': '/dev/sdc3'}]
|
# NOTE(yamahata): ATM only ephemeral0 is supported.
|
||||||
|
# they're ignored for now
|
||||||
|
#{'virtual_name': 'ephemeral1', 'device_name': '/dev/sdc2'},
|
||||||
|
#{'virtual_name': 'ephemeral2', 'device_name': '/dev/sdc3'}
|
||||||
|
]
|
||||||
bdms.sort()
|
bdms.sort()
|
||||||
expected_result.sort()
|
expected_result.sort()
|
||||||
self.assertDictListMatch(bdms, expected_result)
|
self.assertDictListMatch(bdms, expected_result)
|
||||||
|
|
||||||
self.compute_api._update_block_device_mapping(
|
self.compute_api._update_block_device_mapping(
|
||||||
self.context, instance_id, block_device_mapping)
|
self.context, instance_types.get_default_instance_type(),
|
||||||
|
instance_id, block_device_mapping)
|
||||||
bdms = [self._parse_db_block_device_mapping(bdm_ref)
|
bdms = [self._parse_db_block_device_mapping(bdm_ref)
|
||||||
for bdm_ref in db.block_device_mapping_get_all_by_instance(
|
for bdm_ref in db.block_device_mapping_get_all_by_instance(
|
||||||
self.context, instance_id)]
|
self.context, instance_id)]
|
||||||
expected_result = [
|
expected_result = [
|
||||||
{'snapshot_id': 0x12345678, 'device_name': '/dev/sda1'},
|
{'snapshot_id': 0x12345678, 'device_name': '/dev/sda1'},
|
||||||
|
|
||||||
{'virtual_name': 'swap', 'device_name': '/dev/sdb1'},
|
{'virtual_name': 'swap', 'device_name': '/dev/sdb1',
|
||||||
|
'volume_size': swap_size},
|
||||||
{'snapshot_id': 0x23456789, 'device_name': '/dev/sdb2'},
|
{'snapshot_id': 0x23456789, 'device_name': '/dev/sdb2'},
|
||||||
{'snapshot_id': 0x3456789A, 'device_name': '/dev/sdb3'},
|
{'snapshot_id': 0x3456789A, 'device_name': '/dev/sdb3'},
|
||||||
{'no_device': True, 'device_name': '/dev/sdb4'},
|
{'no_device': True, 'device_name': '/dev/sdb4'},
|
||||||
@@ -972,3 +1439,13 @@ class ComputeTestCase(test.TestCase):
|
|||||||
self.context, instance_id):
|
self.context, instance_id):
|
||||||
db.block_device_mapping_destroy(self.context, bdm['id'])
|
db.block_device_mapping_destroy(self.context, bdm['id'])
|
||||||
self.compute.terminate_instance(self.context, instance_id)
|
self.compute.terminate_instance(self.context, instance_id)
|
||||||
|
|
||||||
|
def test_ephemeral_size(self):
|
||||||
|
local_size = 2
|
||||||
|
inst_type = {'local_gb': local_size}
|
||||||
|
self.assertEqual(self.compute_api._ephemeral_size(inst_type,
|
||||||
|
'ephemeral0'),
|
||||||
|
local_size)
|
||||||
|
self.assertEqual(self.compute_api._ephemeral_size(inst_type,
|
||||||
|
'ephemeral1'),
|
||||||
|
0)
|
||||||
|
|||||||
@@ -19,12 +19,9 @@ Tests For Scheduler Host Filters.
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova.scheduler import host_filter
|
from nova.scheduler import host_filter
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
|
|
||||||
|
|
||||||
class FakeZoneManager:
|
class FakeZoneManager:
|
||||||
pass
|
pass
|
||||||
@@ -57,9 +54,9 @@ class HostFilterTestCase(test.TestCase):
|
|||||||
'host_name-label': 'xs-%s' % multiplier}
|
'host_name-label': 'xs-%s' % multiplier}
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.old_flag = FLAGS.default_host_filter
|
super(HostFilterTestCase, self).setUp()
|
||||||
FLAGS.default_host_filter = \
|
default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter'
|
||||||
'nova.scheduler.host_filter.AllHostsFilter'
|
self.flags(default_host_filter=default_host_filter)
|
||||||
self.instance_type = dict(name='tiny',
|
self.instance_type = dict(name='tiny',
|
||||||
memory_mb=50,
|
memory_mb=50,
|
||||||
vcpus=10,
|
vcpus=10,
|
||||||
@@ -76,9 +73,6 @@ class HostFilterTestCase(test.TestCase):
|
|||||||
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
|
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
|
||||||
self.zone_manager.service_states = states
|
self.zone_manager.service_states = states
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
FLAGS.default_host_filter = self.old_flag
|
|
||||||
|
|
||||||
def test_choose_filter(self):
|
def test_choose_filter(self):
|
||||||
# Test default filter ...
|
# Test default filter ...
|
||||||
hf = host_filter.choose_host_filter()
|
hf = host_filter.choose_host_filter()
|
||||||
|
|||||||
@@ -48,6 +48,10 @@ def stub_set_host_enabled(context, host, enabled):
|
|||||||
return status
|
return status
|
||||||
|
|
||||||
|
|
||||||
|
def stub_host_power_action(context, host, action):
|
||||||
|
return action
|
||||||
|
|
||||||
|
|
||||||
class FakeRequest(object):
|
class FakeRequest(object):
|
||||||
environ = {"nova.context": context.get_admin_context()}
|
environ = {"nova.context": context.get_admin_context()}
|
||||||
|
|
||||||
@@ -62,6 +66,8 @@ class HostTestCase(test.TestCase):
|
|||||||
self.stubs.Set(scheduler_api, 'get_host_list', stub_get_host_list)
|
self.stubs.Set(scheduler_api, 'get_host_list', stub_get_host_list)
|
||||||
self.stubs.Set(self.controller.compute_api, 'set_host_enabled',
|
self.stubs.Set(self.controller.compute_api, 'set_host_enabled',
|
||||||
stub_set_host_enabled)
|
stub_set_host_enabled)
|
||||||
|
self.stubs.Set(self.controller.compute_api, 'host_power_action',
|
||||||
|
stub_host_power_action)
|
||||||
|
|
||||||
def test_list_hosts(self):
|
def test_list_hosts(self):
|
||||||
"""Verify that the compute hosts are returned."""
|
"""Verify that the compute hosts are returned."""
|
||||||
@@ -87,6 +93,18 @@ class HostTestCase(test.TestCase):
|
|||||||
result_c2 = self.controller.update(self.req, "host_c2", body=en_body)
|
result_c2 = self.controller.update(self.req, "host_c2", body=en_body)
|
||||||
self.assertEqual(result_c2["status"], "disabled")
|
self.assertEqual(result_c2["status"], "disabled")
|
||||||
|
|
||||||
|
def test_host_startup(self):
|
||||||
|
result = self.controller.startup(self.req, "host_c1")
|
||||||
|
self.assertEqual(result["power_action"], "startup")
|
||||||
|
|
||||||
|
def test_host_shutdown(self):
|
||||||
|
result = self.controller.shutdown(self.req, "host_c1")
|
||||||
|
self.assertEqual(result["power_action"], "shutdown")
|
||||||
|
|
||||||
|
def test_host_reboot(self):
|
||||||
|
result = self.controller.reboot(self.req, "host_c1")
|
||||||
|
self.assertEqual(result["power_action"], "reboot")
|
||||||
|
|
||||||
def test_bad_status_value(self):
|
def test_bad_status_value(self):
|
||||||
bad_body = {"status": "bad"}
|
bad_body = {"status": "bad"}
|
||||||
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
|
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
|
||||||
|
|||||||
134
nova/tests/test_image.py
Normal file
134
nova/tests/test_image.py
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC
|
||||||
|
# Author: Soren Hansen
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
|
||||||
|
from nova import context
|
||||||
|
from nova import exception
|
||||||
|
from nova import test
|
||||||
|
import nova.image
|
||||||
|
|
||||||
|
|
||||||
|
class _ImageTestCase(test.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super(_ImageTestCase, self).setUp()
|
||||||
|
self.context = context.get_admin_context()
|
||||||
|
|
||||||
|
def test_index(self):
|
||||||
|
res = self.image_service.index(self.context)
|
||||||
|
for image in res:
|
||||||
|
self.assertEquals(set(image.keys()), set(['id', 'name']))
|
||||||
|
|
||||||
|
def test_detail(self):
|
||||||
|
res = self.image_service.detail(self.context)
|
||||||
|
for image in res:
|
||||||
|
keys = set(image.keys())
|
||||||
|
self.assertEquals(keys, set(['id', 'name', 'created_at',
|
||||||
|
'updated_at', 'deleted_at', 'deleted',
|
||||||
|
'status', 'is_public', 'properties']))
|
||||||
|
self.assertTrue(isinstance(image['created_at'], datetime.datetime))
|
||||||
|
self.assertTrue(isinstance(image['updated_at'], datetime.datetime))
|
||||||
|
|
||||||
|
if not (isinstance(image['deleted_at'], datetime.datetime) or
|
||||||
|
image['deleted_at'] is None):
|
||||||
|
self.fail('image\'s "deleted_at" attribute was neither a '
|
||||||
|
'datetime object nor None')
|
||||||
|
|
||||||
|
def check_is_bool(image, key):
|
||||||
|
val = image.get('deleted')
|
||||||
|
if not isinstance(val, bool):
|
||||||
|
self.fail('image\'s "%s" attribute wasn\'t '
|
||||||
|
'a bool: %r' % (key, val))
|
||||||
|
|
||||||
|
check_is_bool(image, 'deleted')
|
||||||
|
check_is_bool(image, 'is_public')
|
||||||
|
|
||||||
|
def test_index_and_detail_have_same_results(self):
|
||||||
|
index = self.image_service.index(self.context)
|
||||||
|
detail = self.image_service.detail(self.context)
|
||||||
|
index_set = set([(i['id'], i['name']) for i in index])
|
||||||
|
detail_set = set([(i['id'], i['name']) for i in detail])
|
||||||
|
self.assertEqual(index_set, detail_set)
|
||||||
|
|
||||||
|
def test_show_raises_imagenotfound_for_invalid_id(self):
|
||||||
|
self.assertRaises(exception.ImageNotFound,
|
||||||
|
self.image_service.show,
|
||||||
|
self.context,
|
||||||
|
'this image does not exist')
|
||||||
|
|
||||||
|
def test_show_by_name(self):
|
||||||
|
self.assertRaises(exception.ImageNotFound,
|
||||||
|
self.image_service.show_by_name,
|
||||||
|
self.context,
|
||||||
|
'this image does not exist')
|
||||||
|
|
||||||
|
def test_create_adds_id(self):
|
||||||
|
index = self.image_service.index(self.context)
|
||||||
|
image_count = len(index)
|
||||||
|
|
||||||
|
self.image_service.create(self.context, {})
|
||||||
|
|
||||||
|
index = self.image_service.index(self.context)
|
||||||
|
self.assertEquals(len(index), image_count + 1)
|
||||||
|
|
||||||
|
self.assertTrue(index[0]['id'])
|
||||||
|
|
||||||
|
def test_create_keeps_id(self):
|
||||||
|
self.image_service.create(self.context, {'id': '34'})
|
||||||
|
self.image_service.show(self.context, '34')
|
||||||
|
|
||||||
|
def test_create_rejects_duplicate_ids(self):
|
||||||
|
self.image_service.create(self.context, {'id': '34'})
|
||||||
|
self.assertRaises(exception.Duplicate,
|
||||||
|
self.image_service.create,
|
||||||
|
self.context,
|
||||||
|
{'id': '34'})
|
||||||
|
|
||||||
|
# Make sure there's still one left
|
||||||
|
self.image_service.show(self.context, '34')
|
||||||
|
|
||||||
|
def test_update(self):
|
||||||
|
self.image_service.create(self.context,
|
||||||
|
{'id': '34', 'foo': 'bar'})
|
||||||
|
|
||||||
|
self.image_service.update(self.context, '34',
|
||||||
|
{'id': '34', 'foo': 'baz'})
|
||||||
|
|
||||||
|
img = self.image_service.show(self.context, '34')
|
||||||
|
self.assertEquals(img['foo'], 'baz')
|
||||||
|
|
||||||
|
def test_delete(self):
|
||||||
|
self.image_service.create(self.context, {'id': '34', 'foo': 'bar'})
|
||||||
|
self.image_service.delete(self.context, '34')
|
||||||
|
self.assertRaises(exception.NotFound,
|
||||||
|
self.image_service.show,
|
||||||
|
self.context,
|
||||||
|
'34')
|
||||||
|
|
||||||
|
def test_delete_all(self):
|
||||||
|
self.image_service.create(self.context, {'id': '32', 'foo': 'bar'})
|
||||||
|
self.image_service.create(self.context, {'id': '33', 'foo': 'bar'})
|
||||||
|
self.image_service.create(self.context, {'id': '34', 'foo': 'bar'})
|
||||||
|
self.image_service.delete_all()
|
||||||
|
index = self.image_service.index(self.context)
|
||||||
|
self.assertEquals(len(index), 0)
|
||||||
|
|
||||||
|
|
||||||
|
class FakeImageTestCase(_ImageTestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super(FakeImageTestCase, self).setUp()
|
||||||
|
self.image_service = nova.image.fake.FakeImageService()
|
||||||
@@ -38,7 +38,6 @@ from nova.virt.libvirt import firewall
|
|||||||
|
|
||||||
libvirt = None
|
libvirt = None
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
flags.DECLARE('instances_path', 'nova.compute.manager')
|
|
||||||
|
|
||||||
|
|
||||||
def _concurrency(wait, done, target):
|
def _concurrency(wait, done, target):
|
||||||
@@ -93,6 +92,7 @@ def _setup_networking(instance_id, ip='1.2.3.4'):
|
|||||||
class CacheConcurrencyTestCase(test.TestCase):
|
class CacheConcurrencyTestCase(test.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(CacheConcurrencyTestCase, self).setUp()
|
super(CacheConcurrencyTestCase, self).setUp()
|
||||||
|
self.flags(instances_path='nova.compute.manager')
|
||||||
|
|
||||||
def fake_exists(fname):
|
def fake_exists(fname):
|
||||||
basedir = os.path.join(FLAGS.instances_path, '_base')
|
basedir = os.path.join(FLAGS.instances_path, '_base')
|
||||||
@@ -158,7 +158,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
self.context = context.RequestContext(self.user_id, self.project_id)
|
self.context = context.RequestContext(self.user_id, self.project_id)
|
||||||
self.network = utils.import_object(FLAGS.network_manager)
|
self.network = utils.import_object(FLAGS.network_manager)
|
||||||
self.context = context.get_admin_context()
|
self.context = context.get_admin_context()
|
||||||
FLAGS.instances_path = ''
|
self.flags(instances_path='')
|
||||||
self.call_libvirt_dependant_setup = False
|
self.call_libvirt_dependant_setup = False
|
||||||
self.test_ip = '10.11.12.13'
|
self.test_ip = '10.11.12.13'
|
||||||
|
|
||||||
@@ -169,6 +169,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
'project_id': 'fake',
|
'project_id': 'fake',
|
||||||
'bridge': 'br101',
|
'bridge': 'br101',
|
||||||
'image_ref': '123456',
|
'image_ref': '123456',
|
||||||
|
'local_gb': 20,
|
||||||
'instance_type_id': '5'} # m1.small
|
'instance_type_id': '5'} # m1.small
|
||||||
|
|
||||||
def lazy_load_library_exists(self):
|
def lazy_load_library_exists(self):
|
||||||
@@ -322,7 +323,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
if not self.lazy_load_library_exists():
|
if not self.lazy_load_library_exists():
|
||||||
return
|
return
|
||||||
|
|
||||||
FLAGS.image_service = 'nova.image.fake.FakeImageService'
|
self.flags(image_service='nova.image.fake.FakeImageService')
|
||||||
|
|
||||||
# Start test
|
# Start test
|
||||||
image_service = utils.import_object(FLAGS.image_service)
|
image_service = utils.import_object(FLAGS.image_service)
|
||||||
@@ -357,7 +358,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
if not self.lazy_load_library_exists():
|
if not self.lazy_load_library_exists():
|
||||||
return
|
return
|
||||||
|
|
||||||
FLAGS.image_service = 'nova.image.fake.FakeImageService'
|
self.flags(image_service='nova.image.fake.FakeImageService')
|
||||||
|
|
||||||
# Start test
|
# Start test
|
||||||
image_service = utils.import_object(FLAGS.image_service)
|
image_service = utils.import_object(FLAGS.image_service)
|
||||||
@@ -521,7 +522,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
'disk.local')]
|
'disk.local')]
|
||||||
|
|
||||||
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
||||||
FLAGS.libvirt_type = libvirt_type
|
self.flags(libvirt_type=libvirt_type)
|
||||||
conn = connection.LibvirtConnection(True)
|
conn = connection.LibvirtConnection(True)
|
||||||
|
|
||||||
uri = conn.get_uri()
|
uri = conn.get_uri()
|
||||||
@@ -546,9 +547,9 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
# checking against that later on. This way we make sure the
|
# checking against that later on. This way we make sure the
|
||||||
# implementation doesn't fiddle around with the FLAGS.
|
# implementation doesn't fiddle around with the FLAGS.
|
||||||
testuri = 'something completely different'
|
testuri = 'something completely different'
|
||||||
FLAGS.libvirt_uri = testuri
|
self.flags(libvirt_uri=testuri)
|
||||||
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
||||||
FLAGS.libvirt_type = libvirt_type
|
self.flags(libvirt_type=libvirt_type)
|
||||||
conn = connection.LibvirtConnection(True)
|
conn = connection.LibvirtConnection(True)
|
||||||
uri = conn.get_uri()
|
uri = conn.get_uri()
|
||||||
self.assertEquals(uri, testuri)
|
self.assertEquals(uri, testuri)
|
||||||
@@ -556,8 +557,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_update_available_resource_works_correctly(self):
|
def test_update_available_resource_works_correctly(self):
|
||||||
"""Confirm compute_node table is updated successfully."""
|
"""Confirm compute_node table is updated successfully."""
|
||||||
org_path = FLAGS.instances_path = ''
|
self.flags(instances_path='.')
|
||||||
FLAGS.instances_path = '.'
|
|
||||||
|
|
||||||
# Prepare mocks
|
# Prepare mocks
|
||||||
def getVersion():
|
def getVersion():
|
||||||
@@ -604,12 +604,10 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
self.assertTrue(compute_node['hypervisor_version'] > 0)
|
self.assertTrue(compute_node['hypervisor_version'] > 0)
|
||||||
|
|
||||||
db.service_destroy(self.context, service_ref['id'])
|
db.service_destroy(self.context, service_ref['id'])
|
||||||
FLAGS.instances_path = org_path
|
|
||||||
|
|
||||||
def test_update_resource_info_no_compute_record_found(self):
|
def test_update_resource_info_no_compute_record_found(self):
|
||||||
"""Raise exception if no recorde found on services table."""
|
"""Raise exception if no recorde found on services table."""
|
||||||
org_path = FLAGS.instances_path = ''
|
self.flags(instances_path='.')
|
||||||
FLAGS.instances_path = '.'
|
|
||||||
self.create_fake_libvirt_mock()
|
self.create_fake_libvirt_mock()
|
||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
@@ -618,8 +616,6 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
conn.update_available_resource,
|
conn.update_available_resource,
|
||||||
self.context, 'dummy')
|
self.context, 'dummy')
|
||||||
|
|
||||||
FLAGS.instances_path = org_path
|
|
||||||
|
|
||||||
def test_ensure_filtering_rules_for_instance_timeout(self):
|
def test_ensure_filtering_rules_for_instance_timeout(self):
|
||||||
"""ensure_filtering_fules_for_instance() finishes with timeout."""
|
"""ensure_filtering_fules_for_instance() finishes with timeout."""
|
||||||
# Skip if non-libvirt environment
|
# Skip if non-libvirt environment
|
||||||
@@ -749,6 +745,42 @@ class LibvirtConnTestCase(test.TestCase):
|
|||||||
ip = conn.get_host_ip_addr()
|
ip = conn.get_host_ip_addr()
|
||||||
self.assertEquals(ip, FLAGS.my_ip)
|
self.assertEquals(ip, FLAGS.my_ip)
|
||||||
|
|
||||||
|
def test_volume_in_mapping(self):
|
||||||
|
conn = connection.LibvirtConnection(False)
|
||||||
|
swap = {'device_name': '/dev/sdb',
|
||||||
|
'swap_size': 1}
|
||||||
|
ephemerals = [{'num': 0,
|
||||||
|
'virtual_name': 'ephemeral0',
|
||||||
|
'device_name': '/dev/sdc1',
|
||||||
|
'size': 1},
|
||||||
|
{'num': 2,
|
||||||
|
'virtual_name': 'ephemeral2',
|
||||||
|
'device_name': '/dev/sdd',
|
||||||
|
'size': 1}]
|
||||||
|
block_device_mapping = [{'mount_device': '/dev/sde',
|
||||||
|
'device_path': 'fake_device'},
|
||||||
|
{'mount_device': '/dev/sdf',
|
||||||
|
'device_path': 'fake_device'}]
|
||||||
|
block_device_info = {
|
||||||
|
'root_device_name': '/dev/sda',
|
||||||
|
'swap': swap,
|
||||||
|
'ephemerals': ephemerals,
|
||||||
|
'block_device_mapping': block_device_mapping}
|
||||||
|
|
||||||
|
def _assert_volume_in_mapping(device_name, true_or_false):
|
||||||
|
self.assertEquals(conn._volume_in_mapping(device_name,
|
||||||
|
block_device_info),
|
||||||
|
true_or_false)
|
||||||
|
|
||||||
|
_assert_volume_in_mapping('sda', False)
|
||||||
|
_assert_volume_in_mapping('sdb', True)
|
||||||
|
_assert_volume_in_mapping('sdc1', True)
|
||||||
|
_assert_volume_in_mapping('sdd', True)
|
||||||
|
_assert_volume_in_mapping('sde', True)
|
||||||
|
_assert_volume_in_mapping('sdf', True)
|
||||||
|
_assert_volume_in_mapping('sdg', False)
|
||||||
|
_assert_volume_in_mapping('sdh1', False)
|
||||||
|
|
||||||
|
|
||||||
class NWFilterFakes:
|
class NWFilterFakes:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@@ -889,18 +921,18 @@ class IptablesFirewallTestCase(test.TestCase):
|
|||||||
# self.fw.add_instance(instance_ref)
|
# self.fw.add_instance(instance_ref)
|
||||||
def fake_iptables_execute(*cmd, **kwargs):
|
def fake_iptables_execute(*cmd, **kwargs):
|
||||||
process_input = kwargs.get('process_input', None)
|
process_input = kwargs.get('process_input', None)
|
||||||
if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'):
|
if cmd == ('ip6tables-save', '-t', 'filter'):
|
||||||
return '\n'.join(self.in6_filter_rules), None
|
return '\n'.join(self.in6_filter_rules), None
|
||||||
if cmd == ('sudo', 'iptables-save', '-t', 'filter'):
|
if cmd == ('iptables-save', '-t', 'filter'):
|
||||||
return '\n'.join(self.in_filter_rules), None
|
return '\n'.join(self.in_filter_rules), None
|
||||||
if cmd == ('sudo', 'iptables-save', '-t', 'nat'):
|
if cmd == ('iptables-save', '-t', 'nat'):
|
||||||
return '\n'.join(self.in_nat_rules), None
|
return '\n'.join(self.in_nat_rules), None
|
||||||
if cmd == ('sudo', 'iptables-restore'):
|
if cmd == ('iptables-restore',):
|
||||||
lines = process_input.split('\n')
|
lines = process_input.split('\n')
|
||||||
if '*filter' in lines:
|
if '*filter' in lines:
|
||||||
self.out_rules = lines
|
self.out_rules = lines
|
||||||
return '', ''
|
return '', ''
|
||||||
if cmd == ('sudo', 'ip6tables-restore'):
|
if cmd == ('ip6tables-restore',):
|
||||||
lines = process_input.split('\n')
|
lines = process_input.split('\n')
|
||||||
if '*filter' in lines:
|
if '*filter' in lines:
|
||||||
self.out6_rules = lines
|
self.out6_rules = lines
|
||||||
@@ -1162,8 +1194,11 @@ class NWFilterTestCase(test.TestCase):
|
|||||||
'project_id': 'fake',
|
'project_id': 'fake',
|
||||||
'instance_type_id': 1})
|
'instance_type_id': 1})
|
||||||
|
|
||||||
def _create_instance_type(self, params={}):
|
def _create_instance_type(self, params=None):
|
||||||
"""Create a test instance"""
|
"""Create a test instance"""
|
||||||
|
if not params:
|
||||||
|
params = {}
|
||||||
|
|
||||||
context = self.context.elevated()
|
context = self.context.elevated()
|
||||||
inst = {}
|
inst = {}
|
||||||
inst['name'] = 'm1.small'
|
inst['name'] = 'm1.small'
|
||||||
|
|||||||
@@ -17,7 +17,6 @@
|
|||||||
|
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova.network import manager as network_manager
|
from nova.network import manager as network_manager
|
||||||
@@ -26,7 +25,6 @@ from nova.network import manager as network_manager
|
|||||||
import mox
|
import mox
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
LOG = logging.getLogger('nova.tests.network')
|
LOG = logging.getLogger('nova.tests.network')
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -20,13 +20,11 @@ Unit Tests for remote procedure calls using queue
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import flags
|
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
from nova import test
|
from nova import test
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
LOG = logging.getLogger('nova.tests.rpc')
|
LOG = logging.getLogger('nova.tests.rpc')
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,12 +1,32 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2010 Openstack, LLC.
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Tests For RPC AMQP.
|
||||||
|
"""
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import flags
|
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
from nova.rpc import amqp
|
from nova.rpc import amqp
|
||||||
from nova import test
|
from nova import test
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
|
||||||
LOG = logging.getLogger('nova.tests.rpc')
|
LOG = logging.getLogger('nova.tests.rpc')
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
47
nova/tests/test_skip_examples.py
Normal file
47
nova/tests/test_skip_examples.py
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from nova import test
|
||||||
|
|
||||||
|
|
||||||
|
class ExampleSkipTestCase(test.TestCase):
|
||||||
|
test_counter = 0
|
||||||
|
|
||||||
|
@test.skip_test("Example usage of @test.skip_test()")
|
||||||
|
def test_skip_test_example(self):
|
||||||
|
self.fail("skip_test failed to work properly.")
|
||||||
|
|
||||||
|
@test.skip_if(True, "Example usage of @test.skip_if()")
|
||||||
|
def test_skip_if_example(self):
|
||||||
|
self.fail("skip_if failed to work properly.")
|
||||||
|
|
||||||
|
@test.skip_unless(False, "Example usage of @test.skip_unless()")
|
||||||
|
def test_skip_unless_example(self):
|
||||||
|
self.fail("skip_unless failed to work properly.")
|
||||||
|
|
||||||
|
@test.skip_if(False, "This test case should never be skipped.")
|
||||||
|
def test_001_increase_test_counter(self):
|
||||||
|
ExampleSkipTestCase.test_counter += 1
|
||||||
|
|
||||||
|
@test.skip_unless(True, "This test case should never be skipped.")
|
||||||
|
def test_002_increase_test_counter(self):
|
||||||
|
ExampleSkipTestCase.test_counter += 1
|
||||||
|
|
||||||
|
def test_003_verify_test_counter(self):
|
||||||
|
self.assertEquals(ExampleSkipTestCase.test_counter, 2,
|
||||||
|
"Tests were not skipped appropriately")
|
||||||
83
nova/tests/test_virt.py
Normal file
83
nova/tests/test_virt.py
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 Isaku Yamahata
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from nova import flags
|
||||||
|
from nova import test
|
||||||
|
from nova.virt import driver
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
class TestVirtDriver(test.TestCase):
|
||||||
|
def test_block_device(self):
|
||||||
|
swap = {'device_name': '/dev/sdb',
|
||||||
|
'swap_size': 1}
|
||||||
|
ephemerals = [{'num': 0,
|
||||||
|
'virtual_name': 'ephemeral0',
|
||||||
|
'device_name': '/dev/sdc1',
|
||||||
|
'size': 1}]
|
||||||
|
block_device_mapping = [{'mount_device': '/dev/sde',
|
||||||
|
'device_path': 'fake_device'}]
|
||||||
|
block_device_info = {
|
||||||
|
'root_device_name': '/dev/sda',
|
||||||
|
'swap': swap,
|
||||||
|
'ephemerals': ephemerals,
|
||||||
|
'block_device_mapping': block_device_mapping}
|
||||||
|
|
||||||
|
empty_block_device_info = {}
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
driver.block_device_info_get_root(block_device_info), '/dev/sda')
|
||||||
|
self.assertEqual(
|
||||||
|
driver.block_device_info_get_root(empty_block_device_info), None)
|
||||||
|
self.assertEqual(
|
||||||
|
driver.block_device_info_get_root(None), None)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
driver.block_device_info_get_swap(block_device_info), swap)
|
||||||
|
self.assertEqual(driver.block_device_info_get_swap(
|
||||||
|
empty_block_device_info)['device_name'], None)
|
||||||
|
self.assertEqual(driver.block_device_info_get_swap(
|
||||||
|
empty_block_device_info)['swap_size'], 0)
|
||||||
|
self.assertEqual(
|
||||||
|
driver.block_device_info_get_swap({'swap': None})['device_name'],
|
||||||
|
None)
|
||||||
|
self.assertEqual(
|
||||||
|
driver.block_device_info_get_swap({'swap': None})['swap_size'],
|
||||||
|
0)
|
||||||
|
self.assertEqual(
|
||||||
|
driver.block_device_info_get_swap(None)['device_name'], None)
|
||||||
|
self.assertEqual(
|
||||||
|
driver.block_device_info_get_swap(None)['swap_size'], 0)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
driver.block_device_info_get_ephemerals(block_device_info),
|
||||||
|
ephemerals)
|
||||||
|
self.assertEqual(
|
||||||
|
driver.block_device_info_get_ephemerals(empty_block_device_info),
|
||||||
|
[])
|
||||||
|
self.assertEqual(
|
||||||
|
driver.block_device_info_get_ephemerals(None),
|
||||||
|
[])
|
||||||
|
|
||||||
|
def test_swap_is_usable(self):
|
||||||
|
self.assertFalse(driver.swap_is_usable(None))
|
||||||
|
self.assertFalse(driver.swap_is_usable({'device_name': None}))
|
||||||
|
self.assertFalse(driver.swap_is_usable({'device_name': '/dev/sdb',
|
||||||
|
'swap_size': 0}))
|
||||||
|
self.assertTrue(driver.swap_is_usable({'device_name': '/dev/sdb',
|
||||||
|
'swap_size': 1}))
|
||||||
@@ -414,8 +414,9 @@ class ISCSITestCase(DriverTestCase):
|
|||||||
self.mox.StubOutWithMock(self.volume.driver, '_execute')
|
self.mox.StubOutWithMock(self.volume.driver, '_execute')
|
||||||
for i in volume_id_list:
|
for i in volume_id_list:
|
||||||
tid = db.volume_get_iscsi_target_num(self.context, i)
|
tid = db.volume_get_iscsi_target_num(self.context, i)
|
||||||
self.volume.driver._execute("sudo", "ietadm", "--op", "show",
|
self.volume.driver._execute("ietadm", "--op", "show",
|
||||||
"--tid=%(tid)d" % locals())
|
"--tid=%(tid)d" % locals(),
|
||||||
|
run_as_root=True)
|
||||||
|
|
||||||
self.stream.truncate(0)
|
self.stream.truncate(0)
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
@@ -433,8 +434,9 @@ class ISCSITestCase(DriverTestCase):
|
|||||||
# the first vblade process isn't running
|
# the first vblade process isn't running
|
||||||
tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0])
|
tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0])
|
||||||
self.mox.StubOutWithMock(self.volume.driver, '_execute')
|
self.mox.StubOutWithMock(self.volume.driver, '_execute')
|
||||||
self.volume.driver._execute("sudo", "ietadm", "--op", "show",
|
self.volume.driver._execute("ietadm", "--op", "show",
|
||||||
"--tid=%(tid)d" % locals()).AndRaise(
|
"--tid=%(tid)d" % locals(),
|
||||||
|
run_as_root=True).AndRaise(
|
||||||
exception.ProcessExecutionError())
|
exception.ProcessExecutionError())
|
||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
|
|||||||
@@ -71,9 +71,9 @@ class XenAPIVolumeTestCase(test.TestCase):
|
|||||||
self.user_id = 'fake'
|
self.user_id = 'fake'
|
||||||
self.project_id = 'fake'
|
self.project_id = 'fake'
|
||||||
self.context = context.RequestContext(self.user_id, self.project_id)
|
self.context = context.RequestContext(self.user_id, self.project_id)
|
||||||
FLAGS.target_host = '127.0.0.1'
|
self.flags(target_host='127.0.0.1',
|
||||||
FLAGS.xenapi_connection_url = 'test_url'
|
xenapi_connection_url='test_url',
|
||||||
FLAGS.xenapi_connection_password = 'test_pass'
|
xenapi_connection_password='test_pass')
|
||||||
db_fakes.stub_out_db_instance_api(self.stubs)
|
db_fakes.stub_out_db_instance_api(self.stubs)
|
||||||
stubs.stub_out_get_target(self.stubs)
|
stubs.stub_out_get_target(self.stubs)
|
||||||
xenapi_fake.reset()
|
xenapi_fake.reset()
|
||||||
@@ -170,6 +170,10 @@ def reset_network(*args):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def _find_rescue_vbd_ref(*args):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class XenAPIVMTestCase(test.TestCase):
|
class XenAPIVMTestCase(test.TestCase):
|
||||||
"""Unit tests for VM operations."""
|
"""Unit tests for VM operations."""
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
@@ -189,6 +193,8 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
stubs.stubout_stream_disk(self.stubs)
|
stubs.stubout_stream_disk(self.stubs)
|
||||||
stubs.stubout_is_vdi_pv(self.stubs)
|
stubs.stubout_is_vdi_pv(self.stubs)
|
||||||
self.stubs.Set(vmops.VMOps, 'reset_network', reset_network)
|
self.stubs.Set(vmops.VMOps, 'reset_network', reset_network)
|
||||||
|
self.stubs.Set(vmops.VMOps, '_find_rescue_vbd_ref',
|
||||||
|
_find_rescue_vbd_ref)
|
||||||
stubs.stub_out_vm_methods(self.stubs)
|
stubs.stub_out_vm_methods(self.stubs)
|
||||||
glance_stubs.stubout_glance_client(self.stubs)
|
glance_stubs.stubout_glance_client(self.stubs)
|
||||||
fake_utils.stub_out_utils_execute(self.stubs)
|
fake_utils.stub_out_utils_execute(self.stubs)
|
||||||
@@ -397,7 +403,7 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
instance_type_id="3", os_type="linux",
|
instance_type_id="3", os_type="linux",
|
||||||
architecture="x86-64", instance_id=1,
|
architecture="x86-64", instance_id=1,
|
||||||
check_injection=False,
|
check_injection=False,
|
||||||
create_record=True):
|
create_record=True, empty_dns=False):
|
||||||
stubs.stubout_loopingcall_start(self.stubs)
|
stubs.stubout_loopingcall_start(self.stubs)
|
||||||
if create_record:
|
if create_record:
|
||||||
values = {'id': instance_id,
|
values = {'id': instance_id,
|
||||||
@@ -426,12 +432,22 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
'label': 'fake',
|
'label': 'fake',
|
||||||
'mac': 'DE:AD:BE:EF:00:00',
|
'mac': 'DE:AD:BE:EF:00:00',
|
||||||
'rxtx_cap': 3})]
|
'rxtx_cap': 3})]
|
||||||
|
if empty_dns:
|
||||||
|
network_info[0][1]['dns'] = []
|
||||||
|
|
||||||
self.conn.spawn(self.context, instance, network_info)
|
self.conn.spawn(self.context, instance, network_info)
|
||||||
self.create_vm_record(self.conn, os_type, instance_id)
|
self.create_vm_record(self.conn, os_type, instance_id)
|
||||||
self.check_vm_record(self.conn, check_injection)
|
self.check_vm_record(self.conn, check_injection)
|
||||||
self.assertTrue(instance.os_type)
|
self.assertTrue(instance.os_type)
|
||||||
self.assertTrue(instance.architecture)
|
self.assertTrue(instance.architecture)
|
||||||
|
|
||||||
|
def test_spawn_empty_dns(self):
|
||||||
|
""""Test spawning with an empty dns list"""
|
||||||
|
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
|
||||||
|
os_type="linux", architecture="x86-64",
|
||||||
|
empty_dns=True)
|
||||||
|
self.check_vm_params_for_linux()
|
||||||
|
|
||||||
def test_spawn_not_enough_memory(self):
|
def test_spawn_not_enough_memory(self):
|
||||||
self.assertRaises(Exception,
|
self.assertRaises(Exception,
|
||||||
self._test_spawn,
|
self._test_spawn,
|
||||||
@@ -532,8 +548,8 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
return '', ''
|
return '', ''
|
||||||
|
|
||||||
fake_utils.fake_execute_set_repliers([
|
fake_utils.fake_execute_set_repliers([
|
||||||
# Capture the sudo tee .../etc/network/interfaces command
|
# Capture the tee .../etc/network/interfaces command
|
||||||
(r'(sudo\s+)?tee.*interfaces', _tee_handler),
|
(r'tee.*interfaces', _tee_handler),
|
||||||
])
|
])
|
||||||
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
|
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
|
||||||
glance_stubs.FakeGlance.IMAGE_KERNEL,
|
glance_stubs.FakeGlance.IMAGE_KERNEL,
|
||||||
@@ -576,9 +592,9 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
return '', ''
|
return '', ''
|
||||||
|
|
||||||
fake_utils.fake_execute_set_repliers([
|
fake_utils.fake_execute_set_repliers([
|
||||||
(r'(sudo\s+)?mount', _mount_handler),
|
(r'mount', _mount_handler),
|
||||||
(r'(sudo\s+)?umount', _umount_handler),
|
(r'umount', _umount_handler),
|
||||||
(r'(sudo\s+)?tee.*interfaces', _tee_handler)])
|
(r'tee.*interfaces', _tee_handler)])
|
||||||
self._test_spawn(1, 2, 3, check_injection=True)
|
self._test_spawn(1, 2, 3, check_injection=True)
|
||||||
|
|
||||||
# tee must not run in this case, where an injection-capable
|
# tee must not run in this case, where an injection-capable
|
||||||
@@ -638,6 +654,24 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
# Ensure that it will not unrescue a non-rescued instance.
|
# Ensure that it will not unrescue a non-rescued instance.
|
||||||
self.assertRaises(Exception, conn.unrescue, instance, None)
|
self.assertRaises(Exception, conn.unrescue, instance, None)
|
||||||
|
|
||||||
|
def test_revert_migration(self):
|
||||||
|
instance = self._create_instance()
|
||||||
|
|
||||||
|
class VMOpsMock():
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.revert_migration_called = False
|
||||||
|
|
||||||
|
def revert_migration(self, instance):
|
||||||
|
self.revert_migration_called = True
|
||||||
|
|
||||||
|
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
|
||||||
|
|
||||||
|
conn = xenapi_conn.get_connection(False)
|
||||||
|
conn._vmops = VMOpsMock()
|
||||||
|
conn.revert_migration(instance)
|
||||||
|
self.assertTrue(conn._vmops.revert_migration_called)
|
||||||
|
|
||||||
def _create_instance(self, instance_id=1, spawn=True):
|
def _create_instance(self, instance_id=1, spawn=True):
|
||||||
"""Creates and spawns a test instance."""
|
"""Creates and spawns a test instance."""
|
||||||
stubs.stubout_loopingcall_start(self.stubs)
|
stubs.stubout_loopingcall_start(self.stubs)
|
||||||
@@ -719,9 +753,9 @@ class XenAPIMigrateInstance(test.TestCase):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(XenAPIMigrateInstance, self).setUp()
|
super(XenAPIMigrateInstance, self).setUp()
|
||||||
self.stubs = stubout.StubOutForTesting()
|
self.stubs = stubout.StubOutForTesting()
|
||||||
FLAGS.target_host = '127.0.0.1'
|
self.flags(target_host='127.0.0.1',
|
||||||
FLAGS.xenapi_connection_url = 'test_url'
|
xenapi_connection_url='test_url',
|
||||||
FLAGS.xenapi_connection_password = 'test_pass'
|
xenapi_connection_password='test_pass')
|
||||||
db_fakes.stub_out_db_instance_api(self.stubs)
|
db_fakes.stub_out_db_instance_api(self.stubs)
|
||||||
stubs.stub_out_get_target(self.stubs)
|
stubs.stub_out_get_target(self.stubs)
|
||||||
xenapi_fake.reset()
|
xenapi_fake.reset()
|
||||||
@@ -751,15 +785,26 @@ class XenAPIMigrateInstance(test.TestCase):
|
|||||||
conn = xenapi_conn.get_connection(False)
|
conn = xenapi_conn.get_connection(False)
|
||||||
conn.migrate_disk_and_power_off(instance, '127.0.0.1')
|
conn.migrate_disk_and_power_off(instance, '127.0.0.1')
|
||||||
|
|
||||||
def test_finish_migrate(self):
|
def test_revert_migrate(self):
|
||||||
instance = db.instance_create(self.context, self.values)
|
instance = db.instance_create(self.context, self.values)
|
||||||
self.called = False
|
self.called = False
|
||||||
|
self.fake_vm_start_called = False
|
||||||
|
self.fake_revert_migration_called = False
|
||||||
|
|
||||||
|
def fake_vm_start(*args, **kwargs):
|
||||||
|
self.fake_vm_start_called = True
|
||||||
|
|
||||||
def fake_vdi_resize(*args, **kwargs):
|
def fake_vdi_resize(*args, **kwargs):
|
||||||
self.called = True
|
self.called = True
|
||||||
|
|
||||||
|
def fake_revert_migration(*args, **kwargs):
|
||||||
|
self.fake_revert_migration_called = True
|
||||||
|
|
||||||
self.stubs.Set(stubs.FakeSessionForMigrationTests,
|
self.stubs.Set(stubs.FakeSessionForMigrationTests,
|
||||||
"VDI_resize_online", fake_vdi_resize)
|
"VDI_resize_online", fake_vdi_resize)
|
||||||
|
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
|
||||||
|
self.stubs.Set(vmops.VMOps, 'revert_migration', fake_revert_migration)
|
||||||
|
|
||||||
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
|
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
|
||||||
stubs.stubout_loopingcall_start(self.stubs)
|
stubs.stubout_loopingcall_start(self.stubs)
|
||||||
conn = xenapi_conn.get_connection(False)
|
conn = xenapi_conn.get_connection(False)
|
||||||
@@ -781,6 +826,48 @@ class XenAPIMigrateInstance(test.TestCase):
|
|||||||
dict(base_copy='hurr', cow='durr'),
|
dict(base_copy='hurr', cow='durr'),
|
||||||
network_info, resize_instance=True)
|
network_info, resize_instance=True)
|
||||||
self.assertEqual(self.called, True)
|
self.assertEqual(self.called, True)
|
||||||
|
self.assertEqual(self.fake_vm_start_called, True)
|
||||||
|
|
||||||
|
conn.revert_migration(instance)
|
||||||
|
self.assertEqual(self.fake_revert_migration_called, True)
|
||||||
|
|
||||||
|
def test_finish_migrate(self):
|
||||||
|
instance = db.instance_create(self.context, self.values)
|
||||||
|
self.called = False
|
||||||
|
self.fake_vm_start_called = False
|
||||||
|
|
||||||
|
def fake_vm_start(*args, **kwargs):
|
||||||
|
self.fake_vm_start_called = True
|
||||||
|
|
||||||
|
def fake_vdi_resize(*args, **kwargs):
|
||||||
|
self.called = True
|
||||||
|
|
||||||
|
self.stubs.Set(stubs.FakeSessionForMigrationTests,
|
||||||
|
"VDI_resize_online", fake_vdi_resize)
|
||||||
|
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
|
||||||
|
|
||||||
|
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
|
||||||
|
stubs.stubout_loopingcall_start(self.stubs)
|
||||||
|
conn = xenapi_conn.get_connection(False)
|
||||||
|
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
|
||||||
|
{'broadcast': '192.168.0.255',
|
||||||
|
'dns': ['192.168.0.1'],
|
||||||
|
'gateway': '192.168.0.1',
|
||||||
|
'gateway6': 'dead:beef::1',
|
||||||
|
'ip6s': [{'enabled': '1',
|
||||||
|
'ip': 'dead:beef::dcad:beff:feef:0',
|
||||||
|
'netmask': '64'}],
|
||||||
|
'ips': [{'enabled': '1',
|
||||||
|
'ip': '192.168.0.100',
|
||||||
|
'netmask': '255.255.255.0'}],
|
||||||
|
'label': 'fake',
|
||||||
|
'mac': 'DE:AD:BE:EF:00:00',
|
||||||
|
'rxtx_cap': 3})]
|
||||||
|
conn.finish_migration(self.context, instance,
|
||||||
|
dict(base_copy='hurr', cow='durr'),
|
||||||
|
network_info, resize_instance=True)
|
||||||
|
self.assertEqual(self.called, True)
|
||||||
|
self.assertEqual(self.fake_vm_start_called, True)
|
||||||
|
|
||||||
def test_finish_migrate_no_local_storage(self):
|
def test_finish_migrate_no_local_storage(self):
|
||||||
tiny_type_id = \
|
tiny_type_id = \
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ Tests For ZoneManager
|
|||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import mox
|
import mox
|
||||||
import novaclient
|
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import db
|
from nova import db
|
||||||
|
|||||||
Reference in New Issue
Block a user