Access DB values as dict not as attributes. Part 3
We cannot assume nova.db.api will be returning sqlalchemy objects attributes, instead treat return values as dicts Part of blueprint db-api-cleanup Change-Id: Icf45e5475b9d626f7e07efe953615dd82bb69ade
This commit is contained in:
@@ -242,9 +242,9 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
|
|||||||
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
|
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
|
||||||
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
||||||
instance = db.instance_create(self.context, self.instance_values)
|
instance = db.instance_create(self.context, self.instance_values)
|
||||||
vm = xenapi_fake.create_vm(instance.name, 'Running')
|
vm = xenapi_fake.create_vm(instance['name'], 'Running')
|
||||||
result = conn.attach_volume(self._make_connection_info(),
|
result = conn.attach_volume(self._make_connection_info(),
|
||||||
instance.name, '/dev/sdc')
|
instance['name'], '/dev/sdc')
|
||||||
|
|
||||||
# check that the VM has a VBD attached to it
|
# check that the VM has a VBD attached to it
|
||||||
# Get XenAPI record for VBD
|
# Get XenAPI record for VBD
|
||||||
@@ -259,11 +259,11 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
|
|||||||
stubs.FakeSessionForVolumeFailedTests)
|
stubs.FakeSessionForVolumeFailedTests)
|
||||||
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
||||||
instance = db.instance_create(self.context, self.instance_values)
|
instance = db.instance_create(self.context, self.instance_values)
|
||||||
xenapi_fake.create_vm(instance.name, 'Running')
|
xenapi_fake.create_vm(instance['name'], 'Running')
|
||||||
self.assertRaises(exception.VolumeDriverNotFound,
|
self.assertRaises(exception.VolumeDriverNotFound,
|
||||||
conn.attach_volume,
|
conn.attach_volume,
|
||||||
{'driver_volume_type': 'nonexist'},
|
{'driver_volume_type': 'nonexist'},
|
||||||
instance.name,
|
instance['name'],
|
||||||
'/dev/sdc')
|
'/dev/sdc')
|
||||||
|
|
||||||
|
|
||||||
@@ -409,7 +409,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
|
|||||||
if not vm_rec["is_control_domain"]:
|
if not vm_rec["is_control_domain"]:
|
||||||
vm_labels.append(vm_rec["name_label"])
|
vm_labels.append(vm_rec["name_label"])
|
||||||
|
|
||||||
self.assertEquals(vm_labels, [instance.name])
|
self.assertEquals(vm_labels, [instance['name']])
|
||||||
|
|
||||||
# Ensure VBDs were torn down
|
# Ensure VBDs were torn down
|
||||||
vbd_labels = []
|
vbd_labels = []
|
||||||
@@ -417,7 +417,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
|
|||||||
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
|
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
|
||||||
vbd_labels.append(vbd_rec["vm_name_label"])
|
vbd_labels.append(vbd_rec["vm_name_label"])
|
||||||
|
|
||||||
self.assertEquals(vbd_labels, [instance.name])
|
self.assertEquals(vbd_labels, [instance['name']])
|
||||||
|
|
||||||
# Ensure VDIs were torn down
|
# Ensure VDIs were torn down
|
||||||
for vdi_ref in xenapi_fake.get_all('VDI'):
|
for vdi_ref in xenapi_fake.get_all('VDI'):
|
||||||
@@ -587,8 +587,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
|
|||||||
'herp', network_info)
|
'herp', network_info)
|
||||||
self.create_vm_record(self.conn, os_type, instance['name'])
|
self.create_vm_record(self.conn, os_type, instance['name'])
|
||||||
self.check_vm_record(self.conn, check_injection)
|
self.check_vm_record(self.conn, check_injection)
|
||||||
self.assertTrue(instance.os_type)
|
self.assertTrue(instance['os_type'])
|
||||||
self.assertTrue(instance.architecture)
|
self.assertTrue(instance['architecture'])
|
||||||
|
|
||||||
def test_spawn_empty_dns(self):
|
def test_spawn_empty_dns(self):
|
||||||
"""Test spawning with an empty dns list"""
|
"""Test spawning with an empty dns list"""
|
||||||
@@ -825,7 +825,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
|
|||||||
instance = self._create_instance()
|
instance = self._create_instance()
|
||||||
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
|
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
|
||||||
fake.FakeVirtAPI())
|
fake.FakeVirtAPI())
|
||||||
vm_ref = vm_utils.lookup(session, instance.name)
|
vm_ref = vm_utils.lookup(session, instance['name'])
|
||||||
|
|
||||||
swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
|
swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
|
||||||
root_vdi_ref = xenapi_fake.create_vdi('root', None)
|
root_vdi_ref = xenapi_fake.create_vdi('root', None)
|
||||||
@@ -853,7 +853,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
|
|||||||
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
||||||
# Unrescue expects the original instance to be powered off
|
# Unrescue expects the original instance to be powered off
|
||||||
conn.power_off(instance)
|
conn.power_off(instance)
|
||||||
rescue_vm = xenapi_fake.create_vm(instance.name + '-rescue', 'Running')
|
rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
|
||||||
|
'Running')
|
||||||
conn.unrescue(instance, None)
|
conn.unrescue(instance, None)
|
||||||
|
|
||||||
def test_unrescue_not_in_rescue(self):
|
def test_unrescue_not_in_rescue(self):
|
||||||
@@ -894,16 +895,16 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
|
|||||||
fake.FakeVirtAPI())
|
fake.FakeVirtAPI())
|
||||||
instance = self._create_instance(spawn=False)
|
instance = self._create_instance(spawn=False)
|
||||||
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
||||||
xenapi_fake.create_vm(instance.name, 'Halted')
|
xenapi_fake.create_vm(instance['name'], 'Halted')
|
||||||
conn.reboot(instance, None, "SOFT")
|
conn.reboot(instance, None, "SOFT")
|
||||||
vm_ref = vm_utils.lookup(session, instance.name)
|
vm_ref = vm_utils.lookup(session, instance['name'])
|
||||||
vm = xenapi_fake.get_record('VM', vm_ref)
|
vm = xenapi_fake.get_record('VM', vm_ref)
|
||||||
self.assertEquals(vm['power_state'], 'Running')
|
self.assertEquals(vm['power_state'], 'Running')
|
||||||
|
|
||||||
def test_reboot_unknown_state(self):
|
def test_reboot_unknown_state(self):
|
||||||
instance = self._create_instance(spawn=False)
|
instance = self._create_instance(spawn=False)
|
||||||
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
||||||
xenapi_fake.create_vm(instance.name, 'Unknown')
|
xenapi_fake.create_vm(instance['name'], 'Unknown')
|
||||||
self.assertRaises(xenapi_fake.Failure, conn.reboot, instance,
|
self.assertRaises(xenapi_fake.Failure, conn.reboot, instance,
|
||||||
None, "SOFT")
|
None, "SOFT")
|
||||||
|
|
||||||
@@ -1128,7 +1129,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
|
|||||||
|
|
||||||
def test_migrate_disk_and_power_off(self):
|
def test_migrate_disk_and_power_off(self):
|
||||||
instance = db.instance_create(self.context, self.instance_values)
|
instance = db.instance_create(self.context, self.instance_values)
|
||||||
xenapi_fake.create_vm(instance.name, 'Running')
|
xenapi_fake.create_vm(instance['name'], 'Running')
|
||||||
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
|
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
|
||||||
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
||||||
conn.migrate_disk_and_power_off(self.context, instance,
|
conn.migrate_disk_and_power_off(self.context, instance,
|
||||||
@@ -1136,7 +1137,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
|
|||||||
|
|
||||||
def test_migrate_disk_and_power_off_passes_exceptions(self):
|
def test_migrate_disk_and_power_off_passes_exceptions(self):
|
||||||
instance = db.instance_create(self.context, self.instance_values)
|
instance = db.instance_create(self.context, self.instance_values)
|
||||||
xenapi_fake.create_vm(instance.name, 'Running')
|
xenapi_fake.create_vm(instance['name'], 'Running')
|
||||||
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
|
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
|
||||||
|
|
||||||
def fake_raise(*args, **kwargs):
|
def fake_raise(*args, **kwargs):
|
||||||
@@ -1176,7 +1177,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
|
|||||||
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
||||||
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
|
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
|
||||||
spectacular=True)
|
spectacular=True)
|
||||||
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
|
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
|
||||||
base = xenapi_fake.create_vdi('hurr', 'fake')
|
base = xenapi_fake.create_vdi('hurr', 'fake')
|
||||||
base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
|
base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
|
||||||
cow = xenapi_fake.create_vdi('durr', 'fake')
|
cow = xenapi_fake.create_vdi('durr', 'fake')
|
||||||
@@ -1211,7 +1212,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
|
|||||||
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
||||||
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
|
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
|
||||||
spectacular=True)
|
spectacular=True)
|
||||||
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
|
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
|
||||||
conn.finish_migration(self.context, self.migration, instance,
|
conn.finish_migration(self.context, self.migration, instance,
|
||||||
dict(base_copy='hurr', cow='durr'),
|
dict(base_copy='hurr', cow='durr'),
|
||||||
network_info, image_meta, resize_instance=True)
|
network_info, image_meta, resize_instance=True)
|
||||||
@@ -1233,7 +1234,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
|
|||||||
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
||||||
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
|
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
|
||||||
spectacular=True)
|
spectacular=True)
|
||||||
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
|
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
|
||||||
conn.finish_migration(self.context, self.migration, instance,
|
conn.finish_migration(self.context, self.migration, instance,
|
||||||
dict(base_copy='hurr', cow='durr'),
|
dict(base_copy='hurr', cow='durr'),
|
||||||
network_info, image_meta, resize_instance=True)
|
network_info, image_meta, resize_instance=True)
|
||||||
@@ -1250,7 +1251,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
|
|||||||
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
|
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
|
||||||
spectacular=True)
|
spectacular=True)
|
||||||
# Resize instance would be determined by the compute call
|
# Resize instance would be determined by the compute call
|
||||||
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
|
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
|
||||||
conn.finish_migration(self.context, self.migration, instance,
|
conn.finish_migration(self.context, self.migration, instance,
|
||||||
dict(base_copy='hurr', cow='durr'),
|
dict(base_copy='hurr', cow='durr'),
|
||||||
network_info, image_meta, resize_instance=False)
|
network_info, image_meta, resize_instance=False)
|
||||||
@@ -1261,7 +1262,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
|
|||||||
instance_values['root_gb'] = 40
|
instance_values['root_gb'] = 40
|
||||||
instance_values['auto_disk_config'] = False
|
instance_values['auto_disk_config'] = False
|
||||||
instance = db.instance_create(self.context, instance_values)
|
instance = db.instance_create(self.context, instance_values)
|
||||||
xenapi_fake.create_vm(instance.name, 'Running')
|
xenapi_fake.create_vm(instance['name'], 'Running')
|
||||||
instance_type = db.instance_type_get_by_name(self.context, 'm1.small')
|
instance_type = db.instance_type_get_by_name(self.context, 'm1.small')
|
||||||
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
||||||
self.assertRaises(exception.ResizeError,
|
self.assertRaises(exception.ResizeError,
|
||||||
@@ -2159,10 +2160,10 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
|
|||||||
|
|
||||||
aggregate = self._aggregate_setup()
|
aggregate = self._aggregate_setup()
|
||||||
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
|
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
|
||||||
result = db.aggregate_get(self.context, aggregate.id)
|
result = db.aggregate_get(self.context, aggregate['id'])
|
||||||
self.assertTrue(fake_init_pool.called)
|
self.assertTrue(fake_init_pool.called)
|
||||||
self.assertThat(self.fake_metadata,
|
self.assertThat(self.fake_metadata,
|
||||||
matchers.DictMatches(result.metadetails))
|
matchers.DictMatches(result['metadetails']))
|
||||||
|
|
||||||
def test_join_slave(self):
|
def test_join_slave(self):
|
||||||
"""Ensure join_slave gets called when the request gets to master."""
|
"""Ensure join_slave gets called when the request gets to master."""
|
||||||
@@ -2192,12 +2193,12 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
|
|||||||
result = db.aggregate_create(self.context, values)
|
result = db.aggregate_create(self.context, values)
|
||||||
metadata = {pool_states.POOL_FLAG: "XenAPI",
|
metadata = {pool_states.POOL_FLAG: "XenAPI",
|
||||||
pool_states.KEY: pool_states.CREATED}
|
pool_states.KEY: pool_states.CREATED}
|
||||||
db.aggregate_metadata_add(self.context, result.id, metadata)
|
db.aggregate_metadata_add(self.context, result['id'], metadata)
|
||||||
|
|
||||||
db.aggregate_host_add(self.context, result.id, "host")
|
db.aggregate_host_add(self.context, result['id'], "host")
|
||||||
aggregate = db.aggregate_get(self.context, result.id)
|
aggregate = db.aggregate_get(self.context, result['id'])
|
||||||
self.assertEqual(["host"], aggregate.hosts)
|
self.assertEqual(["host"], aggregate['hosts'])
|
||||||
self.assertEqual(metadata, aggregate.metadetails)
|
self.assertEqual(metadata, aggregate['metadetails'])
|
||||||
|
|
||||||
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
|
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
|
||||||
self.assertTrue(fake_pool_set_name_label.called)
|
self.assertTrue(fake_pool_set_name_label.called)
|
||||||
@@ -2238,11 +2239,11 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
|
|||||||
|
|
||||||
aggregate = self._aggregate_setup(metadata=self.fake_metadata)
|
aggregate = self._aggregate_setup(metadata=self.fake_metadata)
|
||||||
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
|
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
|
||||||
result = db.aggregate_get(self.context, aggregate.id)
|
result = db.aggregate_get(self.context, aggregate['id'])
|
||||||
self.assertTrue(fake_clear_pool.called)
|
self.assertTrue(fake_clear_pool.called)
|
||||||
self.assertThat({pool_states.POOL_FLAG: 'XenAPI',
|
self.assertThat({pool_states.POOL_FLAG: 'XenAPI',
|
||||||
pool_states.KEY: pool_states.ACTIVE},
|
pool_states.KEY: pool_states.ACTIVE},
|
||||||
matchers.DictMatches(result.metadetails))
|
matchers.DictMatches(result['metadetails']))
|
||||||
|
|
||||||
def test_remote_master_non_empty_pool(self):
|
def test_remote_master_non_empty_pool(self):
|
||||||
"""Ensure AggregateError is raised if removing the master."""
|
"""Ensure AggregateError is raised if removing the master."""
|
||||||
@@ -2262,13 +2263,13 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
|
|||||||
result = db.aggregate_create(self.context, values)
|
result = db.aggregate_create(self.context, values)
|
||||||
pool_flag = {pool_states.POOL_FLAG: "XenAPI",
|
pool_flag = {pool_states.POOL_FLAG: "XenAPI",
|
||||||
pool_states.KEY: aggr_state}
|
pool_states.KEY: aggr_state}
|
||||||
db.aggregate_metadata_add(self.context, result.id, pool_flag)
|
db.aggregate_metadata_add(self.context, result['id'], pool_flag)
|
||||||
|
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
db.aggregate_host_add(self.context, result.id, host)
|
db.aggregate_host_add(self.context, result['id'], host)
|
||||||
if metadata:
|
if metadata:
|
||||||
db.aggregate_metadata_add(self.context, result.id, metadata)
|
db.aggregate_metadata_add(self.context, result['id'], metadata)
|
||||||
return db.aggregate_get(self.context, result.id)
|
return db.aggregate_get(self.context, result['id'])
|
||||||
|
|
||||||
def test_add_host_to_aggregate_invalid_changing_status(self):
|
def test_add_host_to_aggregate_invalid_changing_status(self):
|
||||||
"""Ensure InvalidAggregateAction is raised when adding host while
|
"""Ensure InvalidAggregateAction is raised when adding host while
|
||||||
@@ -2341,23 +2342,17 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
|
|||||||
fake_driver_add_to_aggregate)
|
fake_driver_add_to_aggregate)
|
||||||
metadata = {pool_states.POOL_FLAG: "XenAPI",
|
metadata = {pool_states.POOL_FLAG: "XenAPI",
|
||||||
pool_states.KEY: pool_states.ACTIVE}
|
pool_states.KEY: pool_states.ACTIVE}
|
||||||
db.aggregate_metadata_add(self.context, self.aggr.id, metadata)
|
db.aggregate_metadata_add(self.context, self.aggr['id'], metadata)
|
||||||
db.aggregate_host_add(self.context, self.aggr.id, 'fake_host')
|
db.aggregate_host_add(self.context, self.aggr['id'], 'fake_host')
|
||||||
|
|
||||||
self.assertRaises(exception.AggregateError,
|
self.assertRaises(exception.AggregateError,
|
||||||
self.compute.add_aggregate_host,
|
self.compute.add_aggregate_host,
|
||||||
self.context, "fake_host",
|
self.context, "fake_host",
|
||||||
aggregate=jsonutils.to_primitive(self.aggr))
|
aggregate=jsonutils.to_primitive(self.aggr))
|
||||||
excepted = db.aggregate_get(self.context, self.aggr.id)
|
excepted = db.aggregate_get(self.context, self.aggr['id'])
|
||||||
self.assertEqual(excepted.metadetails[pool_states.KEY],
|
self.assertEqual(excepted['metadetails'][pool_states.KEY],
|
||||||
pool_states.ERROR)
|
pool_states.ERROR)
|
||||||
self.assertEqual(excepted.hosts, [])
|
self.assertEqual(excepted['hosts'], [])
|
||||||
|
|
||||||
|
|
||||||
class Aggregate(object):
|
|
||||||
def __init__(self, id=None, hosts=None):
|
|
||||||
self.id = id
|
|
||||||
self.hosts = hosts or []
|
|
||||||
|
|
||||||
|
|
||||||
class MockComputeAPI(object):
|
class MockComputeAPI(object):
|
||||||
@@ -2404,7 +2399,7 @@ class HypervisorPoolTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_slave_asks_master_to_add_slave_to_pool(self):
|
def test_slave_asks_master_to_add_slave_to_pool(self):
|
||||||
slave = ResourcePoolWithStubs()
|
slave = ResourcePoolWithStubs()
|
||||||
aggregate = Aggregate(id=98, hosts=[])
|
aggregate = {'id': 98, 'hosts': []}
|
||||||
|
|
||||||
slave.add_to_aggregate("CONTEXT", aggregate, "slave")
|
slave.add_to_aggregate("CONTEXT", aggregate, "slave")
|
||||||
|
|
||||||
@@ -2416,7 +2411,7 @@ class HypervisorPoolTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_slave_asks_master_to_remove_slave_from_pool(self):
|
def test_slave_asks_master_to_remove_slave_from_pool(self):
|
||||||
slave = ResourcePoolWithStubs()
|
slave = ResourcePoolWithStubs()
|
||||||
aggregate = Aggregate(id=98, hosts=[])
|
aggregate = {'id': 98, 'hosts': []}
|
||||||
|
|
||||||
slave.remove_from_aggregate("CONTEXT", aggregate, "slave")
|
slave.remove_from_aggregate("CONTEXT", aggregate, "slave")
|
||||||
|
|
||||||
|
|||||||
@@ -297,8 +297,8 @@ class IptablesFirewallDriver(FirewallDriver):
|
|||||||
'-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
|
'-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
|
||||||
|
|
||||||
def _build_icmp_rule(self, rule, version):
|
def _build_icmp_rule(self, rule, version):
|
||||||
icmp_type = rule.from_port
|
icmp_type = rule['from_port']
|
||||||
icmp_code = rule.to_port
|
icmp_code = rule['to_port']
|
||||||
|
|
||||||
if icmp_type == -1:
|
if icmp_type == -1:
|
||||||
icmp_type_arg = None
|
icmp_type_arg = None
|
||||||
|
|||||||
@@ -55,12 +55,12 @@ class Dom0IptablesFirewallDriver(firewall.IptablesFirewallDriver):
|
|||||||
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
|
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
|
||||||
|
|
||||||
def _build_tcp_udp_rule(self, rule, version):
|
def _build_tcp_udp_rule(self, rule, version):
|
||||||
if rule.from_port == rule.to_port:
|
if rule['from_port'] == rule['to_port']:
|
||||||
return ['--dport', '%s' % (rule.from_port,)]
|
return ['--dport', '%s' % (rule['from_port'],)]
|
||||||
else:
|
else:
|
||||||
# No multiport needed for XS!
|
# No multiport needed for XS!
|
||||||
return ['--dport', '%s:%s' % (rule.from_port,
|
return ['--dport', '%s:%s' % (rule['from_port'],
|
||||||
rule.to_port)]
|
rule['to_port'])]
|
||||||
|
|
||||||
def _provider_rules(self):
|
def _provider_rules(self):
|
||||||
"""Generate a list of rules from provider for IP4 & IP6.
|
"""Generate a list of rules from provider for IP4 & IP6.
|
||||||
|
|||||||
@@ -80,51 +80,51 @@ class ResourcePool(object):
|
|||||||
|
|
||||||
def add_to_aggregate(self, context, aggregate, host, slave_info=None):
|
def add_to_aggregate(self, context, aggregate, host, slave_info=None):
|
||||||
"""Add a compute host to an aggregate."""
|
"""Add a compute host to an aggregate."""
|
||||||
if not self._is_hv_pool(context, aggregate.id):
|
if not self._is_hv_pool(context, aggregate['id']):
|
||||||
return
|
return
|
||||||
|
|
||||||
invalid = {pool_states.CHANGING: 'setup in progress',
|
invalid = {pool_states.CHANGING: 'setup in progress',
|
||||||
pool_states.DISMISSED: 'aggregate deleted',
|
pool_states.DISMISSED: 'aggregate deleted',
|
||||||
pool_states.ERROR: 'aggregate in error'}
|
pool_states.ERROR: 'aggregate in error'}
|
||||||
|
|
||||||
if (self._get_metadata(context, aggregate.id)[pool_states.KEY]
|
if (self._get_metadata(context, aggregate['id'])[pool_states.KEY]
|
||||||
in invalid.keys()):
|
in invalid.keys()):
|
||||||
raise exception.InvalidAggregateAction(
|
raise exception.InvalidAggregateAction(
|
||||||
action='add host',
|
action='add host',
|
||||||
aggregate_id=aggregate.id,
|
aggregate_id=aggregate['id'],
|
||||||
reason=invalid[self._get_metadata(context,
|
reason=invalid[self._get_metadata(context,
|
||||||
aggregate.id)
|
aggregate['id'])
|
||||||
[pool_states.KEY]])
|
[pool_states.KEY]])
|
||||||
|
|
||||||
if (self._get_metadata(context, aggregate.id)[pool_states.KEY]
|
if (self._get_metadata(context, aggregate['id'])[pool_states.KEY]
|
||||||
== pool_states.CREATED):
|
== pool_states.CREATED):
|
||||||
self._virtapi.aggregate_metadata_add(context, aggregate.id,
|
self._virtapi.aggregate_metadata_add(context, aggregate['id'],
|
||||||
{pool_states.KEY:
|
{pool_states.KEY:
|
||||||
pool_states.CHANGING})
|
pool_states.CHANGING})
|
||||||
if len(aggregate.hosts) == 1:
|
if len(aggregate['hosts']) == 1:
|
||||||
# this is the first host of the pool -> make it master
|
# this is the first host of the pool -> make it master
|
||||||
self._init_pool(aggregate.id, aggregate.name)
|
self._init_pool(aggregate['id'], aggregate['name'])
|
||||||
# save metadata so that we can find the master again
|
# save metadata so that we can find the master again
|
||||||
metadata = {'master_compute': host,
|
metadata = {'master_compute': host,
|
||||||
host: self._host_uuid,
|
host: self._host_uuid,
|
||||||
pool_states.KEY: pool_states.ACTIVE}
|
pool_states.KEY: pool_states.ACTIVE}
|
||||||
self._virtapi.aggregate_metadata_add(context, aggregate.id,
|
self._virtapi.aggregate_metadata_add(context, aggregate['id'],
|
||||||
metadata)
|
metadata)
|
||||||
else:
|
else:
|
||||||
# the pool is already up and running, we need to figure out
|
# the pool is already up and running, we need to figure out
|
||||||
# whether we can serve the request from this host or not.
|
# whether we can serve the request from this host or not.
|
||||||
master_compute = self._get_metadata(context,
|
master_compute = self._get_metadata(context,
|
||||||
aggregate.id)['master_compute']
|
aggregate['id'])['master_compute']
|
||||||
if master_compute == CONF.host and master_compute != host:
|
if master_compute == CONF.host and master_compute != host:
|
||||||
# this is the master -> do a pool-join
|
# this is the master -> do a pool-join
|
||||||
# To this aim, nova compute on the slave has to go down.
|
# To this aim, nova compute on the slave has to go down.
|
||||||
# NOTE: it is assumed that ONLY nova compute is running now
|
# NOTE: it is assumed that ONLY nova compute is running now
|
||||||
self._join_slave(aggregate.id, host,
|
self._join_slave(aggregate['id'], host,
|
||||||
slave_info.get('compute_uuid'),
|
slave_info.get('compute_uuid'),
|
||||||
slave_info.get('url'), slave_info.get('user'),
|
slave_info.get('url'), slave_info.get('user'),
|
||||||
slave_info.get('passwd'))
|
slave_info.get('passwd'))
|
||||||
metadata = {host: slave_info.get('xenhost_uuid'), }
|
metadata = {host: slave_info.get('xenhost_uuid'), }
|
||||||
self._virtapi.aggregate_metadata_add(context, aggregate.id,
|
self._virtapi.aggregate_metadata_add(context, aggregate['id'],
|
||||||
metadata)
|
metadata)
|
||||||
elif master_compute and master_compute != host:
|
elif master_compute and master_compute != host:
|
||||||
# send rpc cast to master, asking to add the following
|
# send rpc cast to master, asking to add the following
|
||||||
@@ -137,55 +137,55 @@ class ResourcePool(object):
|
|||||||
def remove_from_aggregate(self, context, aggregate, host, slave_info=None):
|
def remove_from_aggregate(self, context, aggregate, host, slave_info=None):
|
||||||
"""Remove a compute host from an aggregate."""
|
"""Remove a compute host from an aggregate."""
|
||||||
slave_info = slave_info or dict()
|
slave_info = slave_info or dict()
|
||||||
if not self._is_hv_pool(context, aggregate.id):
|
if not self._is_hv_pool(context, aggregate['id']):
|
||||||
return
|
return
|
||||||
|
|
||||||
invalid = {pool_states.CREATED: 'no hosts to remove',
|
invalid = {pool_states.CREATED: 'no hosts to remove',
|
||||||
pool_states.CHANGING: 'setup in progress',
|
pool_states.CHANGING: 'setup in progress',
|
||||||
pool_states.DISMISSED: 'aggregate deleted', }
|
pool_states.DISMISSED: 'aggregate deleted', }
|
||||||
if (self._get_metadata(context, aggregate.id)[pool_states.KEY]
|
if (self._get_metadata(context, aggregate['id'])[pool_states.KEY]
|
||||||
in invalid.keys()):
|
in invalid.keys()):
|
||||||
raise exception.InvalidAggregateAction(
|
raise exception.InvalidAggregateAction(
|
||||||
action='remove host',
|
action='remove host',
|
||||||
aggregate_id=aggregate.id,
|
aggregate_id=aggregate['id'],
|
||||||
reason=invalid[self._get_metadata(context,
|
reason=invalid[self._get_metadata(context,
|
||||||
aggregate.id)[pool_states.KEY]])
|
aggregate['id'])[pool_states.KEY]])
|
||||||
|
|
||||||
master_compute = self._get_metadata(context,
|
master_compute = self._get_metadata(context,
|
||||||
aggregate.id)['master_compute']
|
aggregate['id'])['master_compute']
|
||||||
if master_compute == CONF.host and master_compute != host:
|
if master_compute == CONF.host and master_compute != host:
|
||||||
# this is the master -> instruct it to eject a host from the pool
|
# this is the master -> instruct it to eject a host from the pool
|
||||||
host_uuid = self._get_metadata(context, aggregate.id)[host]
|
host_uuid = self._get_metadata(context, aggregate['id'])[host]
|
||||||
self._eject_slave(aggregate.id,
|
self._eject_slave(aggregate['id'],
|
||||||
slave_info.get('compute_uuid'), host_uuid)
|
slave_info.get('compute_uuid'), host_uuid)
|
||||||
self._virtapi.aggregate_metadata_delete(context, aggregate.id,
|
self._virtapi.aggregate_metadata_delete(context, aggregate['id'],
|
||||||
host)
|
host)
|
||||||
elif master_compute == host:
|
elif master_compute == host:
|
||||||
# Remove master from its own pool -> destroy pool only if the
|
# Remove master from its own pool -> destroy pool only if the
|
||||||
# master is on its own, otherwise raise fault. Destroying a
|
# master is on its own, otherwise raise fault. Destroying a
|
||||||
# pool made only by master is fictional
|
# pool made only by master is fictional
|
||||||
if len(aggregate.hosts) > 1:
|
if len(aggregate['hosts']) > 1:
|
||||||
# NOTE: this could be avoided by doing a master
|
# NOTE: this could be avoided by doing a master
|
||||||
# re-election, but this is simpler for now.
|
# re-election, but this is simpler for now.
|
||||||
raise exception.InvalidAggregateAction(
|
raise exception.InvalidAggregateAction(
|
||||||
aggregate_id=aggregate.id,
|
aggregate_id=aggregate['id'],
|
||||||
action='remove_from_aggregate',
|
action='remove_from_aggregate',
|
||||||
reason=_('Unable to eject %(host)s '
|
reason=_('Unable to eject %(host)s '
|
||||||
'from the pool; pool not empty')
|
'from the pool; pool not empty')
|
||||||
% locals())
|
% locals())
|
||||||
self._clear_pool(aggregate.id)
|
self._clear_pool(aggregate['id'])
|
||||||
for key in ['master_compute', host]:
|
for key in ['master_compute', host]:
|
||||||
self._virtapi.aggregate_metadata_delete(context, aggregate.id,
|
self._virtapi.aggregate_metadata_delete(context,
|
||||||
key)
|
aggregate['id'], key)
|
||||||
elif master_compute and master_compute != host:
|
elif master_compute and master_compute != host:
|
||||||
# A master exists -> forward pool-eject request to master
|
# A master exists -> forward pool-eject request to master
|
||||||
slave_info = self._create_slave_info()
|
slave_info = self._create_slave_info()
|
||||||
|
|
||||||
self.compute_rpcapi.remove_aggregate_host(
|
self.compute_rpcapi.remove_aggregate_host(
|
||||||
context, aggregate.id, host, master_compute, slave_info)
|
context, aggregate['id'], host, master_compute, slave_info)
|
||||||
else:
|
else:
|
||||||
# this shouldn't have happened
|
# this shouldn't have happened
|
||||||
raise exception.AggregateError(aggregate_id=aggregate.id,
|
raise exception.AggregateError(aggregate_id=aggregate['id'],
|
||||||
action='remove_from_aggregate',
|
action='remove_from_aggregate',
|
||||||
reason=_('Unable to eject %(host)s '
|
reason=_('Unable to eject %(host)s '
|
||||||
'from the pool; No master found')
|
'from the pool; No master found')
|
||||||
|
|||||||
Reference in New Issue
Block a user