merged trunk changes
This commit is contained in:
2
.mailmap
2
.mailmap
@@ -16,6 +16,8 @@
|
||||
<jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local>
|
||||
<jmckenty@gmail.com> <joshua.mckenty@nasa.gov>
|
||||
<justin@fathomdb.com> <justinsb@justinsb-desktop>
|
||||
<masumotok@nttdata.co.jp> <root@openstack2-api>
|
||||
<masumotok@nttdata.co.jp> Masumoto<masumotok@nttdata.co.jp>
|
||||
<mordred@inaugust.com> <mordred@hudson>
|
||||
<paul@openstack.org> <pvoccio@castor.local>
|
||||
<paul@openstack.org> <paul.voccio@rackspace.com>
|
||||
|
||||
2
Authors
2
Authors
@@ -26,6 +26,7 @@ Josh Durgin <joshd@hq.newdream.net>
|
||||
Josh Kearney <josh.kearney@rackspace.com>
|
||||
Joshua McKenty <jmckenty@gmail.com>
|
||||
Justin Santa Barbara <justin@fathomdb.com>
|
||||
Kei Masumoto <masumotok@nttdata.co.jp>
|
||||
Ken Pepple <ken.pepple@gmail.com>
|
||||
Koji Iida <iida.koji@lab.ntt.co.jp>
|
||||
Lorin Hochstein <lorin@isi.edu>
|
||||
@@ -34,6 +35,7 @@ Michael Gundlach <michael.gundlach@rackspace.com>
|
||||
Monsyne Dragon <mdragon@rackspace.com>
|
||||
Monty Taylor <mordred@inaugust.com>
|
||||
MORITA Kazutaka <morita.kazutaka@gmail.com>
|
||||
Muneyuki Noguchi <noguchimn@nttdata.co.jp>
|
||||
Nachi Ueno <ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp> <nati.ueno@gmail.com> <nova@u4>
|
||||
Paul Voccio <paul@openstack.org>
|
||||
Rick Clark <rick@openstack.org>
|
||||
|
||||
@@ -12,6 +12,7 @@ include nova/cloudpipe/bootscript.sh
|
||||
include nova/cloudpipe/client.ovpn.template
|
||||
include nova/compute/fakevirtinstance.xml
|
||||
include nova/compute/interfaces.template
|
||||
include nova/db/sqlalchemy/migrate_repo/migrate.cfg
|
||||
include nova/virt/interfaces.template
|
||||
include nova/virt/libvirt*.xml.template
|
||||
include nova/tests/CA/
|
||||
|
||||
@@ -82,6 +82,7 @@ from nova import quota
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.cloudpipe import pipelib
|
||||
from nova.db import migration
|
||||
|
||||
|
||||
logging.basicConfig()
|
||||
@@ -519,6 +520,21 @@ class LogCommands(object):
|
||||
print re.sub('#012', "\n", "\n".join(lines))
|
||||
|
||||
|
||||
class DbCommands(object):
|
||||
"""Class for managing the database."""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def sync(self, version=None):
|
||||
"""Sync the database up to the most recent version."""
|
||||
return migration.db_sync(version)
|
||||
|
||||
def version(self):
|
||||
"""Print the current database version."""
|
||||
print migration.db_version()
|
||||
|
||||
|
||||
CATEGORIES = [
|
||||
('user', UserCommands),
|
||||
('project', ProjectCommands),
|
||||
@@ -528,7 +544,8 @@ CATEGORIES = [
|
||||
('floating', FloatingIpCommands),
|
||||
('network', NetworkCommands),
|
||||
('service', ServiceCommands),
|
||||
('log', LogCommands)]
|
||||
('log', LogCommands),
|
||||
('db', DbCommands)]
|
||||
|
||||
|
||||
def lazy_match(name, key_value_tuples):
|
||||
|
||||
@@ -209,20 +209,6 @@ class Service(object):
|
||||
self.model_disconnected = True
|
||||
logging.exception(_("model server went away"))
|
||||
|
||||
try:
|
||||
# NOTE(vish): This is late-loaded to make sure that the
|
||||
# database is not created before flags have
|
||||
# been loaded.
|
||||
from nova.db.sqlalchemy import models
|
||||
models.register_models()
|
||||
except OperationalError:
|
||||
fl_conn = FLAGS.sql_connection
|
||||
fl_intv = FLAGS.sql_retry_interval
|
||||
logging.exception(_("Data store %(fl_conn)s is"
|
||||
" unreachable. Trying again in %(fl_intv)d"
|
||||
" seconds.") % locals())
|
||||
time.sleep(FLAGS.sql_retry_interval)
|
||||
|
||||
|
||||
def serve(*services):
|
||||
FLAGS(sys.argv)
|
||||
|
||||
@@ -34,3 +34,8 @@
|
||||
# The code below enables nosetests to work with i18n _() blocks
|
||||
import __builtin__
|
||||
setattr(__builtin__, '_', lambda x: x)
|
||||
|
||||
|
||||
def setup():
|
||||
from nova.db import migration
|
||||
migration.db_sync()
|
||||
|
||||
@@ -40,3 +40,4 @@ FLAGS.blades_per_shelf = 4
|
||||
FLAGS.iscsi_num_targets = 8
|
||||
FLAGS.verbose = True
|
||||
FLAGS.sql_connection = 'sqlite:///nova.sqlite'
|
||||
FLAGS.use_ipv6 = True
|
||||
|
||||
@@ -122,10 +122,10 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
if rescue:
|
||||
check = (lambda t: t.find('./os/kernel').text.split('/')[1],
|
||||
'rescue-kernel')
|
||||
'kernel.rescue')
|
||||
check_list.append(check)
|
||||
check = (lambda t: t.find('./os/initrd').text.split('/')[1],
|
||||
'rescue-ramdisk')
|
||||
'ramdisk.rescue')
|
||||
check_list.append(check)
|
||||
else:
|
||||
if expect_kernel:
|
||||
@@ -161,13 +161,16 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
if rescue:
|
||||
common_checks += [
|
||||
(lambda t: t.findall('./devices/disk/source')[0].get(
|
||||
'file').split('/')[1], 'rescue-disk'),
|
||||
'file').split('/')[1], 'disk.rescue'),
|
||||
(lambda t: t.findall('./devices/disk/source')[1].get(
|
||||
'file').split('/')[1], 'disk')]
|
||||
else:
|
||||
common_checks += [(lambda t: t.findall(
|
||||
'./devices/disk/source')[0].get('file').split('/')[1],
|
||||
'disk')]
|
||||
common_checks += [(lambda t: t.findall(
|
||||
'./devices/disk/source')[1].get('file').split('/')[1],
|
||||
'disk.local')]
|
||||
|
||||
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
|
||||
FLAGS.libvirt_type = libvirt_type
|
||||
@@ -225,12 +228,6 @@ class IptablesFirewallTestCase(test.TestCase):
|
||||
self.manager.delete_user(self.user)
|
||||
super(IptablesFirewallTestCase, self).tearDown()
|
||||
|
||||
def _p(self, *args, **kwargs):
|
||||
if 'iptables-restore' in args:
|
||||
print ' '.join(args), kwargs['stdin']
|
||||
if 'iptables-save' in args:
|
||||
return
|
||||
|
||||
in_rules = [
|
||||
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
|
||||
'*filter',
|
||||
@@ -252,11 +249,21 @@ class IptablesFirewallTestCase(test.TestCase):
|
||||
'# Completed on Mon Dec 6 11:54:13 2010',
|
||||
]
|
||||
|
||||
in6_rules = [
|
||||
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
|
||||
'*filter',
|
||||
':INPUT ACCEPT [349155:75810423]',
|
||||
':FORWARD ACCEPT [0:0]',
|
||||
':OUTPUT ACCEPT [349256:75777230]',
|
||||
'COMMIT',
|
||||
'# Completed on Tue Jan 18 23:47:56 2011'
|
||||
]
|
||||
|
||||
def test_static_filters(self):
|
||||
self.fw.execute = self._p
|
||||
instance_ref = db.instance_create(self.context,
|
||||
{'user_id': 'fake',
|
||||
'project_id': 'fake'})
|
||||
'project_id': 'fake',
|
||||
'mac_address': '56:12:12:12:12:12'})
|
||||
ip = '10.11.12.13'
|
||||
|
||||
network_ref = db.project_get_network(self.context,
|
||||
@@ -301,18 +308,31 @@ class IptablesFirewallTestCase(test.TestCase):
|
||||
secgroup['id'])
|
||||
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
|
||||
|
||||
self.fw.add_instance(instance_ref)
|
||||
# self.fw.add_instance(instance_ref)
|
||||
def fake_iptables_execute(cmd, process_input=None):
|
||||
if cmd == 'sudo ip6tables-save -t filter':
|
||||
return '\n'.join(self.in6_rules), None
|
||||
if cmd == 'sudo iptables-save -t filter':
|
||||
return '\n'.join(self.in_rules), None
|
||||
if cmd == 'sudo iptables-restore':
|
||||
self.out_rules = process_input.split('\n')
|
||||
return '', ''
|
||||
if cmd == 'sudo ip6tables-restore':
|
||||
self.out6_rules = process_input.split('\n')
|
||||
return '', ''
|
||||
self.fw.execute = fake_iptables_execute
|
||||
|
||||
out_rules = self.fw.modify_rules(self.in_rules)
|
||||
self.fw.prepare_instance_filter(instance_ref)
|
||||
self.fw.apply_instance_filter(instance_ref)
|
||||
|
||||
in_rules = filter(lambda l: not l.startswith('#'), self.in_rules)
|
||||
for rule in in_rules:
|
||||
if not 'nova' in rule:
|
||||
self.assertTrue(rule in out_rules,
|
||||
self.assertTrue(rule in self.out_rules,
|
||||
'Rule went missing: %s' % rule)
|
||||
|
||||
instance_chain = None
|
||||
for rule in out_rules:
|
||||
for rule in self.out_rules:
|
||||
# This is pretty crude, but it'll do for now
|
||||
if '-d 10.11.12.13 -j' in rule:
|
||||
instance_chain = rule.split(' ')[-1]
|
||||
@@ -320,7 +340,7 @@ class IptablesFirewallTestCase(test.TestCase):
|
||||
self.assertTrue(instance_chain, "The instance chain wasn't added")
|
||||
|
||||
security_group_chain = None
|
||||
for rule in out_rules:
|
||||
for rule in self.out_rules:
|
||||
# This is pretty crude, but it'll do for now
|
||||
if '-A %s -j' % instance_chain in rule:
|
||||
security_group_chain = rule.split(' ')[-1]
|
||||
@@ -329,16 +349,16 @@ class IptablesFirewallTestCase(test.TestCase):
|
||||
"The security group chain wasn't added")
|
||||
|
||||
self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \
|
||||
security_group_chain in out_rules,
|
||||
security_group_chain in self.out_rules,
|
||||
"ICMP acceptance rule wasn't added")
|
||||
|
||||
self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type '
|
||||
' 8 -j ACCEPT' % security_group_chain in out_rules,
|
||||
'8 -j ACCEPT' % security_group_chain in self.out_rules,
|
||||
"ICMP Echo Request acceptance rule wasn't added")
|
||||
|
||||
self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport '
|
||||
'--dports 80:81 -j ACCEPT' % security_group_chain \
|
||||
in out_rules,
|
||||
in self.out_rules,
|
||||
"TCP port 80/81 acceptance rule wasn't added")
|
||||
|
||||
|
||||
@@ -473,5 +493,6 @@ class NWFilterTestCase(test.TestCase):
|
||||
|
||||
self.fw.setup_basic_filtering(instance)
|
||||
self.fw.prepare_instance_filter(instance)
|
||||
self.fw.apply_instance_filter(instance)
|
||||
_ensure_all_called()
|
||||
self.teardown_security_group()
|
||||
|
||||
@@ -34,6 +34,7 @@ from nova.virt.xenapi import volume_utils
|
||||
from nova.virt.xenapi.vmops import SimpleDH
|
||||
from nova.tests.db import fakes as db_fakes
|
||||
from nova.tests.xenapi import stubs
|
||||
from nova.tests.glance import stubs as glance_stubs
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
@@ -108,18 +109,16 @@ class XenAPIVolumeTestCase(test.TestCase):
|
||||
conn = xenapi_conn.get_connection(False)
|
||||
volume = self._create_volume()
|
||||
instance = db.instance_create(self.values)
|
||||
xenapi_fake.create_vm(instance.name, 'Running')
|
||||
vm = xenapi_fake.create_vm(instance.name, 'Running')
|
||||
result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc')
|
||||
|
||||
def check():
|
||||
# check that the VM has a VBD attached to it
|
||||
# Get XenAPI reference for the VM
|
||||
vms = xenapi_fake.get_all('VM')
|
||||
# Get XenAPI record for VBD
|
||||
vbds = xenapi_fake.get_all('VBD')
|
||||
vbd = xenapi_fake.get_record('VBD', vbds[0])
|
||||
vm_ref = vbd['VM']
|
||||
self.assertEqual(vm_ref, vms[0])
|
||||
self.assertEqual(vm_ref, vm)
|
||||
|
||||
check()
|
||||
|
||||
@@ -157,9 +156,14 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
FLAGS.xenapi_connection_url = 'test_url'
|
||||
FLAGS.xenapi_connection_password = 'test_pass'
|
||||
xenapi_fake.reset()
|
||||
xenapi_fake.create_local_srs()
|
||||
db_fakes.stub_out_db_instance_api(self.stubs)
|
||||
xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
|
||||
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
|
||||
stubs.stubout_get_this_vm_uuid(self.stubs)
|
||||
stubs.stubout_stream_disk(self.stubs)
|
||||
glance_stubs.stubout_glance_client(self.stubs,
|
||||
glance_stubs.FakeGlance)
|
||||
self.conn = xenapi_conn.get_connection(False)
|
||||
|
||||
def test_list_instances_0(self):
|
||||
@@ -207,19 +211,18 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
|
||||
check()
|
||||
|
||||
def test_spawn(self):
|
||||
instance = self._create_instance()
|
||||
|
||||
def check():
|
||||
instances = self.conn.list_instances()
|
||||
def check_vm_record(self, conn):
|
||||
instances = conn.list_instances()
|
||||
self.assertEquals(instances, [1])
|
||||
|
||||
# Get Nova record for VM
|
||||
vm_info = self.conn.get_info(1)
|
||||
vm_info = conn.get_info(1)
|
||||
|
||||
# Get XenAPI record for VM
|
||||
vms = xenapi_fake.get_all('VM')
|
||||
vm = xenapi_fake.get_record('VM', vms[0])
|
||||
vms = [rec for ref, rec
|
||||
in xenapi_fake.get_all_records('VM').iteritems()
|
||||
if not rec['is_control_domain']]
|
||||
vm = vms[0]
|
||||
|
||||
# Check that m1.large above turned into the right thing.
|
||||
instance_type = instance_types.INSTANCE_TYPES['m1.large']
|
||||
@@ -240,7 +243,38 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
# Check that the VM is running according to XenAPI.
|
||||
self.assertEquals(vm['power_state'], 'Running')
|
||||
|
||||
check()
|
||||
def _test_spawn(self, image_id, kernel_id, ramdisk_id):
|
||||
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
|
||||
values = {'name': 1,
|
||||
'id': 1,
|
||||
'project_id': self.project.id,
|
||||
'user_id': self.user.id,
|
||||
'image_id': image_id,
|
||||
'kernel_id': kernel_id,
|
||||
'ramdisk_id': ramdisk_id,
|
||||
'instance_type': 'm1.large',
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
}
|
||||
conn = xenapi_conn.get_connection(False)
|
||||
instance = db.instance_create(values)
|
||||
conn.spawn(instance)
|
||||
self.check_vm_record(conn)
|
||||
|
||||
def test_spawn_raw_objectstore(self):
|
||||
FLAGS.xenapi_image_service = 'objectstore'
|
||||
self._test_spawn(1, None, None)
|
||||
|
||||
def test_spawn_objectstore(self):
|
||||
FLAGS.xenapi_image_service = 'objectstore'
|
||||
self._test_spawn(1, 2, 3)
|
||||
|
||||
def test_spawn_raw_glance(self):
|
||||
FLAGS.xenapi_image_service = 'glance'
|
||||
self._test_spawn(1, None, None)
|
||||
|
||||
def test_spawn_glance(self):
|
||||
FLAGS.xenapi_image_service = 'glance'
|
||||
self._test_spawn(1, 2, 3)
|
||||
|
||||
def tearDown(self):
|
||||
super(XenAPIVMTestCase, self).tearDown()
|
||||
|
||||
@@ -115,6 +115,21 @@ def stub_out_get_target(stubs):
|
||||
stubs.Set(volume_utils, '_get_target', fake_get_target)
|
||||
|
||||
|
||||
def stubout_get_this_vm_uuid(stubs):
|
||||
def f():
|
||||
vms = [rec['uuid'] for ref, rec
|
||||
in fake.get_all_records('VM').iteritems()
|
||||
if rec['is_control_domain']]
|
||||
return vms[0]
|
||||
stubs.Set(vm_utils, 'get_this_vm_uuid', f)
|
||||
|
||||
|
||||
def stubout_stream_disk(stubs):
|
||||
def f(_1, _2, _3, _4):
|
||||
pass
|
||||
stubs.Set(vm_utils, '_stream_disk', f)
|
||||
|
||||
|
||||
class FakeSessionForVMTests(fake.SessionBase):
|
||||
""" Stubs out a XenAPISession for VM tests """
|
||||
def __init__(self, uri):
|
||||
@@ -124,7 +139,10 @@ class FakeSessionForVMTests(fake.SessionBase):
|
||||
return self.xenapi.network.get_all_records()
|
||||
|
||||
def host_call_plugin(self, _1, _2, _3, _4, _5):
|
||||
return ''
|
||||
sr_ref = fake.get_all('SR')[0]
|
||||
vdi_ref = fake.create_vdi('', False, sr_ref, False)
|
||||
vdi_rec = fake.get_record('VDI', vdi_ref)
|
||||
return '<string>%s</string>' % vdi_rec['uuid']
|
||||
|
||||
def VM_start(self, _1, ref, _2, _3):
|
||||
vm = fake.get_record('VM', ref)
|
||||
@@ -159,10 +177,6 @@ class FakeSessionForVolumeTests(fake.SessionBase):
|
||||
def __init__(self, uri):
|
||||
super(FakeSessionForVolumeTests, self).__init__(uri)
|
||||
|
||||
def VBD_plug(self, _1, ref):
|
||||
rec = fake.get_record('VBD', ref)
|
||||
rec['currently-attached'] = True
|
||||
|
||||
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
|
||||
_6, _7, _8, _9, _10, _11):
|
||||
valid_vdi = False
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import gettext
|
||||
import os
|
||||
import unittest
|
||||
import sys
|
||||
|
||||
Reference in New Issue
Block a user