Enable flake8 F841 checking
This check looks to see whether a local variable is unused. Fixed all of violators of said check. Change-Id: I6ff349ca8d650ae7d6ebeeb116a1649b0db8f071
This commit is contained in:
parent
5a7eb3aa68
commit
1fdc4afa15
|
@ -801,7 +801,7 @@ class CloudController(object):
|
|||
if volume.get('instance_uuid', None):
|
||||
instance_uuid = volume['instance_uuid']
|
||||
# Make sure instance exists
|
||||
instance = instance_obj.Instance.get_by_uuid(context.elevated(),
|
||||
instance_obj.Instance.get_by_uuid(context.elevated(),
|
||||
instance_uuid)
|
||||
|
||||
instance_ec2_id = ec2utils.id_to_ec2_inst_id(instance_uuid)
|
||||
|
|
|
@ -363,7 +363,7 @@ def validate_device_name(value):
|
|||
# is supported by nova.compute
|
||||
utils.check_string_length(value, 'Device name',
|
||||
min_length=1, max_length=255)
|
||||
except exception.InvalidInput as e:
|
||||
except exception.InvalidInput:
|
||||
raise exception.InvalidBDMFormat(
|
||||
details="Device name empty or too long.")
|
||||
|
||||
|
@ -377,7 +377,7 @@ def validate_and_default_volume_size(bdm):
|
|||
try:
|
||||
bdm['volume_size'] = utils.validate_integer(
|
||||
bdm['volume_size'], 'volume_size', min_value=0)
|
||||
except exception.InvalidInput as e:
|
||||
except exception.InvalidInput:
|
||||
raise exception.InvalidBDMFormat(
|
||||
details="Invalid volume_size.")
|
||||
|
||||
|
|
|
@ -223,7 +223,7 @@ def work_on_disk(dev, root_mb, swap_mb, ephemeral_mb, image_path,
|
|||
|
||||
try:
|
||||
root_uuid = block_uuid(root_part)
|
||||
except processutils.ProcessExecutionError as err:
|
||||
except processutils.ProcessExecutionError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("Failed to detect root device UUID."))
|
||||
return root_uuid
|
||||
|
|
|
@ -689,7 +689,6 @@ class ComputeManager(manager.Manager):
|
|||
'host (%(our_host)s).'),
|
||||
{'instance_host': instance.host,
|
||||
'our_host': our_host}, instance=instance)
|
||||
destroy_disks = False
|
||||
try:
|
||||
network_info = self._get_instance_nw_info(context,
|
||||
instance)
|
||||
|
@ -2280,7 +2279,7 @@ class ComputeManager(manager.Manager):
|
|||
except exception.InstanceNotFound:
|
||||
LOG.info(_("Instance disappeared during terminate"),
|
||||
instance=instance)
|
||||
except Exception as error:
|
||||
except Exception:
|
||||
# As we're trying to delete always go to Error if something
|
||||
# goes wrong that _delete_instance can't handle.
|
||||
with excutils.save_and_reraise_exception():
|
||||
|
@ -5617,7 +5616,7 @@ class ComputeManager(manager.Manager):
|
|||
vm_state=vm_states.ACTIVE,
|
||||
task_state=None)
|
||||
raise error.inner_exception
|
||||
except Exception as error:
|
||||
except Exception:
|
||||
LOG.exception(_('Setting instance vm_state to ERROR'),
|
||||
instance_uuid=instance_uuid)
|
||||
with excutils.save_and_reraise_exception():
|
||||
|
|
|
@ -854,7 +854,7 @@ class ComputeTaskManager(base.Base):
|
|||
self.compute_rpcapi.unshelve_instance(
|
||||
context, instance, host, image=image,
|
||||
filter_properties=filter_properties, node=node)
|
||||
except exception.NoValidHost as ex:
|
||||
except exception.NoValidHost:
|
||||
instance.task_state = None
|
||||
instance.save()
|
||||
LOG.warning(_("No valid host found for unshelve instance"),
|
||||
|
|
|
@ -375,7 +375,6 @@ def sign_csr(csr_text, project_id=None):
|
|||
if not project_id:
|
||||
return _sign_csr(csr_text, ca_folder())
|
||||
_ensure_project_folder(project_id)
|
||||
project_folder = ca_folder(project_id)
|
||||
return _sign_csr(csr_text, ca_folder(project_id))
|
||||
|
||||
|
||||
|
|
|
@ -4235,8 +4235,6 @@ def flavor_get_all(context, inactive=False, filters=None,
|
|||
# database.
|
||||
read_deleted = "yes" if inactive else "no"
|
||||
|
||||
sort_fn = {'desc': desc, 'asc': asc}
|
||||
|
||||
query = _flavor_get_query(context, read_deleted=read_deleted)
|
||||
|
||||
if 'min_memory_mb' in filters:
|
||||
|
@ -5550,10 +5548,8 @@ def archive_deleted_rows_for_table(context, tablename, max_rows):
|
|||
# We have one table (dns_domains) where the key is called
|
||||
# "domain" rather than "id"
|
||||
column = table.c.domain
|
||||
column_name = "domain"
|
||||
else:
|
||||
column = table.c.id
|
||||
column_name = "id"
|
||||
# NOTE(guochbo): Use InsertFromSelect and DeleteFromSelect to avoid
|
||||
# database's limit of maximum parameter in one SQL statement.
|
||||
query_insert = select([table],
|
||||
|
@ -5569,7 +5565,7 @@ def archive_deleted_rows_for_table(context, tablename, max_rows):
|
|||
try:
|
||||
# Group the insert and delete in a transaction.
|
||||
with conn.begin():
|
||||
result_insert = conn.execute(insert_statement)
|
||||
conn.execute(insert_statement)
|
||||
result_delete = conn.execute(delete_statement)
|
||||
except IntegrityError:
|
||||
# A foreign key constraint keeps us from deleting some of
|
||||
|
|
|
@ -282,7 +282,7 @@ class GlanceImageService(object):
|
|||
return self._download_handlers[scheme]
|
||||
except KeyError:
|
||||
return None
|
||||
except Exception as ex:
|
||||
except Exception:
|
||||
LOG.error(_("Failed to instantiate the download handler "
|
||||
"for %(scheme)s") % {'scheme': scheme})
|
||||
return
|
||||
|
|
|
@ -111,7 +111,6 @@ class FloatingIP(object):
|
|||
if not uuidutils.is_uuid_like(instance_uuid):
|
||||
instance_uuid = kwargs.get('instance_uuid')
|
||||
project_id = kwargs.get('project_id')
|
||||
requested_networks = kwargs.get('requested_networks')
|
||||
# call the next inherited class's allocate_for_instance()
|
||||
# which is currently the NetworkManager version
|
||||
# do this first so fixed ip is already allocated
|
||||
|
@ -378,7 +377,7 @@ class FloatingIP(object):
|
|||
self.l3driver.add_floating_ip(floating_address, fixed_address,
|
||||
interface, fixed['network'])
|
||||
except processutils.ProcessExecutionError as e:
|
||||
with excutils.save_and_reraise_exception() as exc_ctxt:
|
||||
with excutils.save_and_reraise_exception():
|
||||
try:
|
||||
floating_ip_obj.FloatingIP.disassociate(
|
||||
context, floating_address)
|
||||
|
|
|
@ -503,7 +503,7 @@ class NetworkManager(manager.Manager):
|
|||
try:
|
||||
self._allocate_mac_addresses(context, instance_uuid, networks,
|
||||
macs)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
# If we fail to allocate any one mac address, clean up all
|
||||
# allocated VIFs
|
||||
|
|
|
@ -261,7 +261,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||
# works.... :/
|
||||
for rule_id in range(0, len(rule_ids)):
|
||||
neutron.delete_security_group_rule(rule_ids.pop())
|
||||
except n_exc.NeutronClientException as e:
|
||||
except n_exc.NeutronClientException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_("Neutron Error unable to delete %s"), rule_ids)
|
||||
|
||||
|
|
|
@ -156,7 +156,6 @@ class FilterScheduler(driver.Scheduler):
|
|||
'scheduler.run_instance.scheduled', payload)
|
||||
|
||||
# Update the metadata if necessary
|
||||
scheduler_hints = filter_properties.get('scheduler_hints') or {}
|
||||
try:
|
||||
updated_instance = driver.instance_update_db(context,
|
||||
instance_uuid)
|
||||
|
|
|
@ -84,7 +84,6 @@ class MemcachedDriver(api.ServiceGroupDriver):
|
|||
|
||||
def _report_state(self, service):
|
||||
"""Update the state of this service in the datastore."""
|
||||
ctxt = context.get_admin_context()
|
||||
try:
|
||||
key = "%(topic)s:%(host)s" % service.service_ref
|
||||
# memcached has data expiration time capability.
|
||||
|
|
|
@ -276,7 +276,7 @@ class TestCase(testtools.TestCase):
|
|||
|
||||
# Collect logs
|
||||
fs = '%(levelname)s [%(name)s] %(message)s'
|
||||
fake = self.useFixture(fixtures.FakeLogger(format=fs, level=None))
|
||||
self.useFixture(fixtures.FakeLogger(format=fs, level=None))
|
||||
root.handlers[0].setLevel(level)
|
||||
|
||||
if level > logging.DEBUG:
|
||||
|
|
|
@ -88,7 +88,7 @@ class CinderCloudTestCase(test.TestCase):
|
|||
def setUp(self):
|
||||
super(CinderCloudTestCase, self).setUp()
|
||||
ec2utils.reset_cache()
|
||||
vol_tmpdir = self.useFixture(fixtures.TempDir()).path
|
||||
self.useFixture(fixtures.TempDir()).path
|
||||
fake_utils.stub_out_utils_spawn_n(self.stubs)
|
||||
self.flags(compute_driver='nova.virt.fake.FakeDriver',
|
||||
volume_api_class='nova.tests.fake_volume.API')
|
||||
|
|
|
@ -471,7 +471,7 @@ class CloudTestCase(test.TestCase):
|
|||
name = 'test name %i' % i
|
||||
descript = 'test description %i' % i
|
||||
create = self.cloud.create_security_group
|
||||
result = create(self.context, name, descript)
|
||||
create(self.context, name, descript)
|
||||
|
||||
# 11'th group should fail
|
||||
self.assertRaises(exception.SecurityGroupLimitExceeded,
|
||||
|
@ -680,9 +680,8 @@ class CloudTestCase(test.TestCase):
|
|||
self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs))
|
||||
|
||||
def test_authorize_security_group_ingress_missing_protocol_params(self):
|
||||
sec = db.security_group_create(self.context,
|
||||
{'project_id': self.context.project_id,
|
||||
'name': 'test'})
|
||||
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
|
||||
db.security_group_create(self.context, kwargs)
|
||||
authz = self.cloud.authorize_security_group_ingress
|
||||
self.assertRaises(exception.MissingParameter, authz, self.context,
|
||||
'test')
|
||||
|
@ -786,10 +785,10 @@ class CloudTestCase(test.TestCase):
|
|||
self.context, **kwargs)
|
||||
|
||||
def test_delete_security_group_in_use_by_group(self):
|
||||
group1 = self.cloud.create_security_group(self.context, 'testgrp1',
|
||||
"test group 1")
|
||||
group2 = self.cloud.create_security_group(self.context, 'testgrp2',
|
||||
"test group 2")
|
||||
self.cloud.create_security_group(self.context, 'testgrp1',
|
||||
"test group 1")
|
||||
self.cloud.create_security_group(self.context, 'testgrp2',
|
||||
"test group 2")
|
||||
kwargs = {'groups': {'1': {'user_id': u'%s' % self.context.user_id,
|
||||
'group_name': u'testgrp2'}},
|
||||
}
|
||||
|
@ -1348,14 +1347,14 @@ class CloudTestCase(test.TestCase):
|
|||
'host': 'host1',
|
||||
'vm_state': 'active',
|
||||
'system_metadata': sys_meta}
|
||||
inst1 = db.instance_create(self.context, args1)
|
||||
db.instance_create(self.context, args1)
|
||||
args2 = {'reservation_id': 'b',
|
||||
'image_ref': image_uuid,
|
||||
'instance_type_id': 1,
|
||||
'host': 'host1',
|
||||
'vm_state': 'active',
|
||||
'system_metadata': sys_meta}
|
||||
inst2 = db.instance_create(self.context, args2)
|
||||
db.instance_create(self.context, args2)
|
||||
result = self.cloud.describe_instances(self.context)
|
||||
self.assertEqual(len(result['reservationSet']), 2)
|
||||
|
||||
|
@ -1367,13 +1366,13 @@ class CloudTestCase(test.TestCase):
|
|||
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
|
||||
sys_meta = flavors.save_flavor_info(
|
||||
{}, flavors.get_flavor(1))
|
||||
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
|
||||
'image_ref': image_uuid,
|
||||
'instance_type_id': 1,
|
||||
'host': 'host1',
|
||||
'hostname': 'server-1234',
|
||||
'vm_state': 'active',
|
||||
'system_metadata': sys_meta})
|
||||
db.instance_create(self.context, {'reservation_id': 'a',
|
||||
'image_ref': image_uuid,
|
||||
'instance_type_id': 1,
|
||||
'host': 'host1',
|
||||
'hostname': 'server-1234',
|
||||
'vm_state': 'active',
|
||||
'system_metadata': sys_meta})
|
||||
result = self.cloud.describe_instances(self.context)
|
||||
result = result['reservationSet'][0]
|
||||
instance = result['instancesSet'][0]
|
||||
|
@ -1728,13 +1727,13 @@ class CloudTestCase(test.TestCase):
|
|||
|
||||
self.stubs.Set(s3.S3ImageService, 'create', fake_create)
|
||||
self.expected_name = 'fake_bucket/fake.img.manifest.xml'
|
||||
result = register_image(self.context,
|
||||
image_location=self.expected_name,
|
||||
name=None)
|
||||
register_image(self.context,
|
||||
image_location=self.expected_name,
|
||||
name=None)
|
||||
self.expected_name = 'an image name'
|
||||
result = register_image(self.context,
|
||||
image_location='some_location',
|
||||
name=self.expected_name)
|
||||
register_image(self.context,
|
||||
image_location='some_location',
|
||||
name=self.expected_name)
|
||||
|
||||
def test_format_image(self):
|
||||
image = {
|
||||
|
@ -1818,7 +1817,7 @@ class CloudTestCase(test.TestCase):
|
|||
output = self.cloud.get_password_data(context=self.context,
|
||||
instance_id=[instance_id])
|
||||
self.assertEqual(output['passwordData'], 'fakepass')
|
||||
rv = self.cloud.terminate_instances(self.context, [instance_id])
|
||||
self.cloud.terminate_instances(self.context, [instance_id])
|
||||
|
||||
def test_console_output(self):
|
||||
instance_id = self._run_instance(
|
||||
|
@ -1831,7 +1830,7 @@ class CloudTestCase(test.TestCase):
|
|||
'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
|
||||
# TODO(soren): We need this until we can stop polling in the rpc code
|
||||
# for unit tests.
|
||||
rv = self.cloud.terminate_instances(self.context, [instance_id])
|
||||
self.cloud.terminate_instances(self.context, [instance_id])
|
||||
|
||||
def test_key_generation(self):
|
||||
result, private_key = self._create_key('test')
|
||||
|
@ -1890,7 +1889,7 @@ class CloudTestCase(test.TestCase):
|
|||
dummypub = f.readline().rstrip()
|
||||
f.close
|
||||
f = open(pubkey_path + '/dummy.fingerprint', 'r')
|
||||
dummyfprint = f.readline().rstrip()
|
||||
f.readline().rstrip()
|
||||
f.close
|
||||
key_name = 'testimportkey'
|
||||
public_key_material = base64.b64encode(dummypub)
|
||||
|
@ -2345,7 +2344,7 @@ class CloudTestCase(test.TestCase):
|
|||
kwargs = {'image_id': 'ami-1',
|
||||
'instance_type': CONF.default_flavor,
|
||||
'max_count': 1, }
|
||||
instance_id = self._run_instance(**kwargs)
|
||||
self._run_instance(**kwargs)
|
||||
|
||||
self.assertRaises(exception.InstanceNotFound,
|
||||
self.cloud.terminate_instances,
|
||||
|
@ -2360,8 +2359,8 @@ class CloudTestCase(test.TestCase):
|
|||
|
||||
internal_uuid = db.get_instance_uuid_by_ec2_id(self.context,
|
||||
ec2utils.ec2_id_to_id(instance_id))
|
||||
instance = db.instance_update(self.context, internal_uuid,
|
||||
{'disable_terminate': True})
|
||||
db.instance_update(self.context, internal_uuid,
|
||||
{'disable_terminate': True})
|
||||
|
||||
expected = {'instancesSet': [
|
||||
{'instanceId': 'i-00000001',
|
||||
|
@ -2372,8 +2371,8 @@ class CloudTestCase(test.TestCase):
|
|||
result = self.cloud.terminate_instances(self.context, [instance_id])
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
instance = db.instance_update(self.context, internal_uuid,
|
||||
{'disable_terminate': False})
|
||||
db.instance_update(self.context, internal_uuid,
|
||||
{'disable_terminate': False})
|
||||
|
||||
expected = {'instancesSet': [
|
||||
{'instanceId': 'i-00000001',
|
||||
|
|
|
@ -121,7 +121,7 @@ class ConsolesExtensionTest(test.NoDBTestCase):
|
|||
req.headers["content-type"] = "application/json"
|
||||
|
||||
res = req.get_response(self.app)
|
||||
output = jsonutils.loads(res.body)
|
||||
jsonutils.loads(res.body)
|
||||
self.assertEqual(res.status_int, 409)
|
||||
|
||||
def test_get_vnc_console_no_type(self):
|
||||
|
@ -207,7 +207,7 @@ class ConsolesExtensionTest(test.NoDBTestCase):
|
|||
req.headers["content-type"] = "application/json"
|
||||
|
||||
res = req.get_response(self.app)
|
||||
output = jsonutils.loads(res.body)
|
||||
jsonutils.loads(res.body)
|
||||
self.assertEqual(res.status_int, 409)
|
||||
|
||||
def test_get_spice_console_no_type(self):
|
||||
|
@ -292,7 +292,7 @@ class ConsolesExtensionTest(test.NoDBTestCase):
|
|||
req.headers["content-type"] = "application/json"
|
||||
|
||||
res = req.get_response(self.app)
|
||||
output = jsonutils.loads(res.body)
|
||||
jsonutils.loads(res.body)
|
||||
self.assertEqual(res.status_int, 409)
|
||||
|
||||
def test_get_rdp_console_no_type(self):
|
||||
|
|
|
@ -141,8 +141,6 @@ class FlavorAccessTest(test.NoDBTestCase):
|
|||
|
||||
def test_list_flavor_access_public(self):
|
||||
# query os-flavor-access on public flavor should return 404
|
||||
req = fakes.HTTPRequest.blank('/v2/fake/flavors/os-flavor-access',
|
||||
use_admin_context=True)
|
||||
self.assertRaises(exc.HTTPNotFound,
|
||||
self.flavor_access_controller.index,
|
||||
self.req, '1')
|
||||
|
@ -294,8 +292,6 @@ class FlavorAccessTest(test.NoDBTestCase):
|
|||
self.stubs.Set(db, 'flavor_access_add',
|
||||
stub_add_flavor_access)
|
||||
body = {'addTenantAccess': {'tenant': 'proj2'}}
|
||||
req = fakes.HTTPRequest.blank('/v2/fake/flavors/2/action',
|
||||
use_admin_context=True)
|
||||
self.assertRaises(exc.HTTPConflict,
|
||||
self.flavor_action_controller._addTenantAccess,
|
||||
self.req, '3', body)
|
||||
|
@ -307,8 +303,6 @@ class FlavorAccessTest(test.NoDBTestCase):
|
|||
self.stubs.Set(db, 'flavor_access_remove',
|
||||
stub_remove_flavor_access)
|
||||
body = {'removeTenantAccess': {'tenant': 'proj2'}}
|
||||
req = fakes.HTTPRequest.blank('/v2/fake/flavors/2/action',
|
||||
use_admin_context=True)
|
||||
self.assertRaises(exc.HTTPNotFound,
|
||||
self.flavor_action_controller._removeTenantAccess,
|
||||
self.req, '3', body)
|
||||
|
|
|
@ -418,7 +418,6 @@ class FloatingIpTest(test.TestCase):
|
|||
self.assertTrue(rsp.status_int == 202)
|
||||
|
||||
def test_floating_ip_associate_invalid_instance(self):
|
||||
fixed_address = '192.168.1.100'
|
||||
|
||||
def fake_get(self, context, id, expected_attrs=None,
|
||||
want_objects=False):
|
||||
|
|
|
@ -396,10 +396,10 @@ class ServersControllerCreateTest(test.TestCase):
|
|||
req.body = jsonutils.dumps(body)
|
||||
req.headers["content-type"] = "application/json"
|
||||
admin_context = context.get_admin_context()
|
||||
service1 = db.service_create(admin_context, {'host': 'host1_zones',
|
||||
'binary': "nova-compute",
|
||||
'topic': 'compute',
|
||||
'report_count': 0})
|
||||
db.service_create(admin_context, {'host': 'host1_zones',
|
||||
'binary': "nova-compute",
|
||||
'topic': 'compute',
|
||||
'report_count': 0})
|
||||
agg = db.aggregate_create(admin_context,
|
||||
{'name': 'agg1'}, {'availability_zone': 'nova'})
|
||||
db.aggregate_host_add(admin_context, agg['id'], 'host1_zones')
|
||||
|
|
|
@ -95,7 +95,7 @@ class EvacuateTest(test.NoDBTestCase):
|
|||
'on_shared_storage': 'False',
|
||||
'admin_password': 'MyNewPass'})
|
||||
res = req.get_response(app)
|
||||
res_dict = jsonutils.loads(res.body)
|
||||
jsonutils.loads(res.body)
|
||||
self.assertEqual(400, res.status_int)
|
||||
|
||||
def test_evacuate_instance_with_too_long_host(self):
|
||||
|
@ -104,7 +104,7 @@ class EvacuateTest(test.NoDBTestCase):
|
|||
'on_shared_storage': 'False',
|
||||
'admin_password': 'MyNewPass'})
|
||||
res = req.get_response(app)
|
||||
res_dict = jsonutils.loads(res.body)
|
||||
jsonutils.loads(res.body)
|
||||
self.assertEqual(400, res.status_int)
|
||||
|
||||
def test_evacuate_instance_with_invalid_characters_host(self):
|
||||
|
@ -113,7 +113,7 @@ class EvacuateTest(test.NoDBTestCase):
|
|||
'on_shared_storage': 'False',
|
||||
'admin_password': 'MyNewPass'})
|
||||
res = req.get_response(app)
|
||||
res_dict = jsonutils.loads(res.body)
|
||||
jsonutils.loads(res.body)
|
||||
self.assertEqual(400, res.status_int)
|
||||
|
||||
def test_evacuate_instance_with_invalid_on_shared_storage(self):
|
||||
|
@ -121,7 +121,7 @@ class EvacuateTest(test.NoDBTestCase):
|
|||
'on_shared_storage': 'foo',
|
||||
'admin_password': 'MyNewPass'})
|
||||
res = req.get_response(app)
|
||||
res_dict = jsonutils.loads(res.body)
|
||||
jsonutils.loads(res.body)
|
||||
self.assertEqual(400, res.status_int)
|
||||
|
||||
def test_evacuate_instance_without_on_shared_storage(self):
|
||||
|
|
|
@ -252,10 +252,10 @@ class ExtendedVolumesTest(test.TestCase):
|
|||
|
||||
def test_attach_volume_disk_bus_and_disk_dev(self):
|
||||
url = "/v3/servers/%s/action" % UUID1
|
||||
res = self._make_request(url, {"attach": {"volume_id": UUID1,
|
||||
"device": "/dev/vdb",
|
||||
"disk_bus": "ide",
|
||||
"device_type": "cdrom"}})
|
||||
self._make_request(url, {"attach": {"volume_id": UUID1,
|
||||
"device": "/dev/vdb",
|
||||
"disk_bus": "ide",
|
||||
"device_type": "cdrom"}})
|
||||
|
||||
def test_attach_volume_with_bad_id(self):
|
||||
url = "/v3/servers/%s/action" % UUID1
|
||||
|
|
|
@ -140,8 +140,6 @@ class FlavorAccessTest(test.NoDBTestCase):
|
|||
|
||||
def test_list_flavor_access_public(self):
|
||||
# query flavor-access on public flavor should return 404
|
||||
req = fakes.HTTPRequestV3.blank('/flavors/fake/flavor-access',
|
||||
use_admin_context=True)
|
||||
self.assertRaises(exc.HTTPNotFound,
|
||||
self.flavor_access_controller.index,
|
||||
self.req, '1')
|
||||
|
|
|
@ -105,7 +105,7 @@ class KeypairsTest(test.TestCase):
|
|||
req.headers['Content-Type'] = 'application/json'
|
||||
res = req.get_response(self.app)
|
||||
self.assertEqual(res.status_int, 400)
|
||||
res_dict = jsonutils.loads(res.body)
|
||||
jsonutils.loads(res.body)
|
||||
|
||||
def test_keypair_create_without_name(self):
|
||||
body = {'keypair': {'public_key': 'public key'}}
|
||||
|
@ -375,7 +375,7 @@ class KeypairsTest(test.TestCase):
|
|||
req.body = jsonutils.dumps(body)
|
||||
req.headers['Content-Type'] = 'application/json'
|
||||
res = req.get_response(self.app)
|
||||
res_dict = jsonutils.loads(res.body)
|
||||
jsonutils.loads(res.body)
|
||||
self.assertEqual(res.status_int, 400)
|
||||
|
||||
|
||||
|
|
|
@ -155,7 +155,6 @@ class ServersControllerCreateTest(test.TestCase):
|
|||
server = self.controller.create(req, body=body).obj['server']
|
||||
|
||||
def test_create_instance_with_multiple_create_disabled(self):
|
||||
ret_res_id = True
|
||||
min_count = 2
|
||||
max_count = 3
|
||||
params = {
|
||||
|
|
|
@ -134,7 +134,7 @@ class ConsolesExtensionTest(test.NoDBTestCase):
|
|||
req.headers["content-type"] = "application/json"
|
||||
|
||||
res = req.get_response(self.app)
|
||||
output = jsonutils.loads(res.body)
|
||||
jsonutils.loads(res.body)
|
||||
self.assertEqual(res.status_int, 409)
|
||||
|
||||
def test_get_vnc_console_no_type(self):
|
||||
|
@ -245,7 +245,7 @@ class ConsolesExtensionTest(test.NoDBTestCase):
|
|||
req.headers["content-type"] = "application/json"
|
||||
|
||||
res = req.get_response(self.app)
|
||||
output = jsonutils.loads(res.body)
|
||||
jsonutils.loads(res.body)
|
||||
self.assertEqual(res.status_int, 409)
|
||||
|
||||
def test_get_spice_console_no_type(self):
|
||||
|
@ -330,7 +330,7 @@ class ConsolesExtensionTest(test.NoDBTestCase):
|
|||
req.headers["content-type"] = "application/json"
|
||||
|
||||
res = req.get_response(self.app)
|
||||
output = jsonutils.loads(res.body)
|
||||
jsonutils.loads(res.body)
|
||||
self.assertEqual(res.status_int, 409)
|
||||
|
||||
def test_get_rdp_console_no_type(self):
|
||||
|
|
|
@ -1277,7 +1277,7 @@ class ServersControllerTest(ControllerTest):
|
|||
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
|
||||
|
||||
req = fakes.HTTPRequestV3.blank('/servers', use_admin_context=True)
|
||||
servers = self.controller.index(req)['servers']
|
||||
self.assertIn('servers', self.controller.index(req))
|
||||
self.assertIn('pci_devices', self.expected_attrs)
|
||||
|
||||
|
||||
|
@ -2390,7 +2390,7 @@ class ServersControllerCreateTest(test.TestCase):
|
|||
self.req.body = jsonutils.dumps(self.body)
|
||||
res = self.controller.create(self.req, body=self.body).obj
|
||||
|
||||
server = res['server']
|
||||
self.assertIn('server', res)
|
||||
self.assertIn('admin_password', self.body['server'])
|
||||
|
||||
def test_create_instance_admin_password_empty(self):
|
||||
|
|
|
@ -372,7 +372,7 @@ class ServicesTest(test.TestCase):
|
|||
|
||||
with mock.patch.object(self.controller.host_api,
|
||||
'service_delete') as service_delete:
|
||||
response = self.controller.delete(request, '1')
|
||||
self.controller.delete(request, '1')
|
||||
service_delete.assert_called_once_with(
|
||||
request.environ['nova.context'], '1')
|
||||
self.assertEqual(self.controller.delete.wsgi_code, 204)
|
||||
|
|
|
@ -60,7 +60,7 @@ class APITest(test.NoDBTestCase):
|
|||
self.assertEqual(res.status_int, 200)
|
||||
self.assertEqual(res.content_type, ctype)
|
||||
|
||||
body = jsonutils.loads(res.body)
|
||||
jsonutils.loads(res.body)
|
||||
|
||||
def test_vendor_content_type_xml(self):
|
||||
ctype = 'application/vnd.openstack.compute+xml'
|
||||
|
@ -72,7 +72,7 @@ class APITest(test.NoDBTestCase):
|
|||
self.assertEqual(res.status_int, 200)
|
||||
self.assertEqual(res.content_type, ctype)
|
||||
|
||||
body = etree.XML(res.body)
|
||||
etree.XML(res.body)
|
||||
|
||||
def test_exceptions_are_converted_to_faults_webob_exc(self):
|
||||
@webob.dec.wsgify
|
||||
|
@ -177,8 +177,6 @@ class APITest(test.NoDBTestCase):
|
|||
class ExceptionWithNoneCode(Exception):
|
||||
code = None
|
||||
|
||||
msg = 'Internal Server Error'
|
||||
|
||||
@webob.dec.wsgify
|
||||
def fail(req):
|
||||
raise ExceptionWithNoneCode()
|
||||
|
|
|
@ -437,7 +437,7 @@ class ExtensionManagerTest(ExtensionTestCase):
|
|||
def test_invalid_extensions(self):
|
||||
# Don't need the serialization middleware here because we're
|
||||
# not testing any serialization
|
||||
app = compute.APIRouter()
|
||||
compute.APIRouter()
|
||||
ext_mgr = compute_extensions.ExtensionManager()
|
||||
ext_mgr.register(InvalidExtension())
|
||||
self.assertTrue(ext_mgr.is_loaded('FOXNSOX'))
|
||||
|
|
|
@ -242,8 +242,6 @@ class FlavorsTest(test.TestCase):
|
|||
response_list = response["flavors"]
|
||||
response_links = response["flavors_links"]
|
||||
|
||||
alternate = "%s/fake/flavors/%s"
|
||||
|
||||
expected_flavors = [
|
||||
{
|
||||
"id": "1",
|
||||
|
@ -275,8 +273,6 @@ class FlavorsTest(test.TestCase):
|
|||
response_list = response["flavors"]
|
||||
response_links = response["flavors_links"]
|
||||
|
||||
alternate = "%s/fake/flavors/%s"
|
||||
|
||||
expected_flavors = [
|
||||
{
|
||||
"id": "1",
|
||||
|
|
|
@ -317,7 +317,6 @@ class ImagesControllerTest(test.NoDBTestCase):
|
|||
self._detail_request(filters, request)
|
||||
|
||||
def test_image_detail_filter_server_href(self):
|
||||
ref = 'http://localhost:8774/servers/' + self.uuid
|
||||
filters = {'property-instance_uuid': self.uuid}
|
||||
request = fakes.HTTPRequest.blank(self.url)
|
||||
self._detail_request(filters, request)
|
||||
|
|
|
@ -684,7 +684,6 @@ class WsgiLimiterTest(BaseLimitTestSuite):
|
|||
|
||||
def test_invalid_methods(self):
|
||||
# Only POSTs should work.
|
||||
requests = []
|
||||
for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
|
||||
request = webob.Request.blank("/", method=method)
|
||||
response = request.get_response(self.app)
|
||||
|
|
|
@ -2101,7 +2101,8 @@ class ServersControllerCreateTest(test.TestCase):
|
|||
self.body['server'].pop('imageRef', None)
|
||||
self.body['server'].update(params)
|
||||
self.req.body = jsonutils.dumps(self.body)
|
||||
server = self.controller.create(self.req, self.body).obj['server']
|
||||
self.assertIn('server',
|
||||
self.controller.create(self.req, self.body).obj)
|
||||
|
||||
def test_create_instance_with_security_group_enabled(self):
|
||||
self.ext_mgr.extensions = {'os-security-groups': 'fake'}
|
||||
|
@ -2218,7 +2219,6 @@ class ServersControllerCreateTest(test.TestCase):
|
|||
|
||||
def test_create_instance_name_all_blank_spaces(self):
|
||||
# proper local hrefs must start with 'http://localhost/v2/'
|
||||
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
|
||||
self.body['server']['name'] = ' ' * 64
|
||||
self.req.body = jsonutils.dumps(self.body)
|
||||
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||
|
@ -2453,7 +2453,8 @@ class ServersControllerCreateTest(test.TestCase):
|
|||
self.body['server']['flavorRef'] = 3
|
||||
self.req.body = jsonutils.dumps(self.body)
|
||||
try:
|
||||
server = self.controller.create(self.req, self.body).obj['server']
|
||||
self.assertIn('server',
|
||||
self.controller.create(self.req, self.body).obj)
|
||||
self.fail('expected quota to be exceeded')
|
||||
except webob.exc.HTTPRequestEntityTooLarge as e:
|
||||
self.assertEqual(e.explanation, expected_msg)
|
||||
|
@ -2992,10 +2993,10 @@ class ServersControllerCreateTest(test.TestCase):
|
|||
expected = 'The requested availability zone is not available'
|
||||
self.assertEqual(e.explanation, expected)
|
||||
admin_context = context.get_admin_context()
|
||||
service1 = db.service_create(admin_context, {'host': 'host1_zones',
|
||||
'binary': "nova-compute",
|
||||
'topic': 'compute',
|
||||
'report_count': 0})
|
||||
db.service_create(admin_context, {'host': 'host1_zones',
|
||||
'binary': "nova-compute",
|
||||
'topic': 'compute',
|
||||
'report_count': 0})
|
||||
agg = db.aggregate_create(admin_context,
|
||||
{'name': 'agg1'}, {'availability_zone': availability_zone})
|
||||
db.aggregate_host_add(admin_context, agg['id'], 'host1_zones')
|
||||
|
@ -3032,7 +3033,6 @@ class ServersControllerCreateTest(test.TestCase):
|
|||
self._test_create_extra(params)
|
||||
|
||||
def test_create_instance_with_multiple_create_disabled(self):
|
||||
ret_res_id = True
|
||||
min_count = 2
|
||||
max_count = 3
|
||||
params = {
|
||||
|
@ -3188,7 +3188,7 @@ class ServersControllerCreateTest(test.TestCase):
|
|||
self.body['server']['flavorRef'] = 3
|
||||
self.req.body = jsonutils.dumps(self.body)
|
||||
try:
|
||||
server = self.controller.create(self.req, self.body).obj['server']
|
||||
self.controller.create(self.req, self.body).obj['server']
|
||||
self.fail('expected quota to be exceeded')
|
||||
except webob.exc.HTTPRequestEntityTooLarge as e:
|
||||
self.assertEqual(e.explanation, expected_msg)
|
||||
|
|
|
@ -267,7 +267,6 @@ def _make_image_fixtures():
|
|||
|
||||
# Snapshot for User 1
|
||||
uuid = 'aa640691-d1a7-4a67-9d3c-d35ee6b3cc74'
|
||||
server_ref = 'http://localhost/v2/servers/' + uuid
|
||||
snapshot_properties = {'instance_uuid': uuid, 'user_id': 'fake'}
|
||||
for status in ('queued', 'saving', 'active', 'killed',
|
||||
'deleted', 'pending_delete'):
|
||||
|
|
|
@ -644,9 +644,9 @@ class ResourceTest(test.NoDBTestCase):
|
|||
return {'foo': 'bar'}
|
||||
|
||||
req = fakes.HTTPRequest.blank('/tests')
|
||||
context = req.environ['nova.context']
|
||||
app = fakes.TestRouter(Controller())
|
||||
response = req.get_response(app)
|
||||
self.assertIn('nova.context', req.environ)
|
||||
self.assertEqual(response.body, '{"foo": "bar"}')
|
||||
self.assertEqual(response.status_int, 200)
|
||||
|
||||
|
@ -672,9 +672,9 @@ class ResourceTest(test.NoDBTestCase):
|
|||
pass
|
||||
|
||||
req = fakes.HTTPRequest.blank('/tests')
|
||||
context = req.environ['nova.context']
|
||||
app = fakes.TestRouter(Controller())
|
||||
response = req.get_response(app)
|
||||
self.assertIn('nova.context', req.environ)
|
||||
self.assertEqual(response.body, '')
|
||||
self.assertEqual(response.status_int, 200)
|
||||
|
||||
|
@ -886,12 +886,12 @@ class ResourceTest(test.NoDBTestCase):
|
|||
|
||||
def extension1(req):
|
||||
called.append('pre1')
|
||||
resp_obj = yield
|
||||
yield
|
||||
called.append('post1')
|
||||
|
||||
def extension2(req):
|
||||
called.append('pre2')
|
||||
resp_obj = yield
|
||||
yield
|
||||
called.append('post2')
|
||||
|
||||
extensions = [extension1, extension2]
|
||||
|
@ -991,11 +991,11 @@ class ResourceTest(test.NoDBTestCase):
|
|||
called = []
|
||||
|
||||
def extension1(req):
|
||||
resp_obj = yield
|
||||
yield
|
||||
called.append(1)
|
||||
|
||||
def extension2(req):
|
||||
resp_obj = yield
|
||||
yield
|
||||
called.append(2)
|
||||
|
||||
ext1 = extension1(None)
|
||||
|
@ -1020,11 +1020,11 @@ class ResourceTest(test.NoDBTestCase):
|
|||
called = []
|
||||
|
||||
def extension1(req):
|
||||
resp_obj = yield
|
||||
yield
|
||||
called.append(1)
|
||||
|
||||
def extension2(req):
|
||||
resp_obj = yield
|
||||
yield
|
||||
called.append(2)
|
||||
yield 'foo'
|
||||
|
||||
|
@ -1217,19 +1217,19 @@ class ValidBodyTest(test.NoDBTestCase):
|
|||
self.assertTrue(self.controller.is_valid_body(body, 'foo'))
|
||||
|
||||
def test_is_valid_body_none(self):
|
||||
resource = wsgi.Resource(controller=None)
|
||||
wsgi.Resource(controller=None)
|
||||
self.assertFalse(self.controller.is_valid_body(None, 'foo'))
|
||||
|
||||
def test_is_valid_body_empty(self):
|
||||
resource = wsgi.Resource(controller=None)
|
||||
wsgi.Resource(controller=None)
|
||||
self.assertFalse(self.controller.is_valid_body({}, 'foo'))
|
||||
|
||||
def test_is_valid_body_no_entity(self):
|
||||
resource = wsgi.Resource(controller=None)
|
||||
wsgi.Resource(controller=None)
|
||||
body = {'bar': {}}
|
||||
self.assertFalse(self.controller.is_valid_body(body, 'foo'))
|
||||
|
||||
def test_is_valid_body_malformed_entity(self):
|
||||
resource = wsgi.Resource(controller=None)
|
||||
wsgi.Resource(controller=None)
|
||||
body = {'foo': 'bar'}
|
||||
self.assertFalse(self.controller.is_valid_body(body, 'foo'))
|
||||
|
|
|
@ -72,10 +72,6 @@ class FakeMonitorClass3(monitors.ResourceMonitorBase):
|
|||
|
||||
class FakeMonitorClass4(monitors.ResourceMonitorBase):
|
||||
def get_metrics(self, **kwargs):
|
||||
data = [{'timestamp': 123,
|
||||
'name': 'key4',
|
||||
'value': 1600,
|
||||
'source': 'libvirt'}]
|
||||
raise test.TestingException()
|
||||
|
||||
def get_metric_names(self):
|
||||
|
|
|
@ -102,26 +102,26 @@ class ClaimTestCase(test.NoDBTestCase):
|
|||
self.assertTrue(re.search(re_obj, str(ee)))
|
||||
|
||||
def test_cpu_unlimited(self):
|
||||
claim = self._claim(vcpus=100000)
|
||||
self._claim(vcpus=100000)
|
||||
|
||||
def test_memory_unlimited(self):
|
||||
claim = self._claim(memory_mb=99999999)
|
||||
self._claim(memory_mb=99999999)
|
||||
|
||||
def test_disk_unlimited_root(self):
|
||||
claim = self._claim(root_gb=999999)
|
||||
self._claim(root_gb=999999)
|
||||
|
||||
def test_disk_unlimited_ephemeral(self):
|
||||
claim = self._claim(ephemeral_gb=999999)
|
||||
self._claim(ephemeral_gb=999999)
|
||||
|
||||
def test_cpu_oversubscription(self):
|
||||
limits = {'vcpu': 16}
|
||||
claim = self._claim(limits, vcpus=8)
|
||||
self._claim(limits, vcpus=8)
|
||||
|
||||
def test_memory_with_overhead(self):
|
||||
overhead = {'memory_mb': 8}
|
||||
limits = {'memory_mb': 2048}
|
||||
claim = self._claim(memory_mb=2040, limits=limits,
|
||||
overhead=overhead)
|
||||
self._claim(memory_mb=2040, limits=limits,
|
||||
overhead=overhead)
|
||||
|
||||
def test_memory_with_overhead_insufficient(self):
|
||||
overhead = {'memory_mb': 9}
|
||||
|
@ -137,8 +137,7 @@ class ClaimTestCase(test.NoDBTestCase):
|
|||
self._claim, limits=limits, vcpus=17)
|
||||
|
||||
def test_memory_oversubscription(self):
|
||||
limits = {'memory_mb': 8192}
|
||||
claim = self._claim(memory_mb=4096)
|
||||
self._claim(memory_mb=4096)
|
||||
|
||||
def test_memory_insufficient(self):
|
||||
limits = {'memory_mb': 8192}
|
||||
|
@ -147,8 +146,8 @@ class ClaimTestCase(test.NoDBTestCase):
|
|||
|
||||
def test_disk_oversubscription(self):
|
||||
limits = {'disk_gb': 60}
|
||||
claim = self._claim(root_gb=10, ephemeral_gb=40,
|
||||
limits=limits)
|
||||
self._claim(root_gb=10, ephemeral_gb=40,
|
||||
limits=limits)
|
||||
|
||||
def test_disk_insufficient(self):
|
||||
limits = {'disk_gb': 45}
|
||||
|
|
|
@ -402,8 +402,7 @@ class ComputeVolumeTestCase(BaseTestCase):
|
|||
def test_attach_volume_serial(self):
|
||||
fake_bdm = block_device_obj.BlockDeviceMapping(**self.fake_volume)
|
||||
with (mock.patch.object(cinder.API, 'get_volume_encryption_metadata',
|
||||
return_value={})
|
||||
) as mock_get_vol_enc:
|
||||
return_value={})):
|
||||
instance = self._create_fake_instance()
|
||||
self.compute.attach_volume(self.context, self.volume_id,
|
||||
'/dev/vdb', instance, bdm=fake_bdm)
|
||||
|
@ -4171,7 +4170,6 @@ class ComputeTestCase(BaseTestCase):
|
|||
orig_mig_save = migration.save
|
||||
orig_inst_save = instance.save
|
||||
network_api = self.compute.network_api
|
||||
conductor_api = self.compute.conductor_api
|
||||
|
||||
self.mox.StubOutWithMock(network_api, 'setup_networks_on_host')
|
||||
self.mox.StubOutWithMock(network_api,
|
||||
|
@ -5227,7 +5225,6 @@ class ComputeTestCase(BaseTestCase):
|
|||
# Confirm exception when pre_live_migration fails.
|
||||
c = context.get_admin_context()
|
||||
|
||||
src_host = 'fake-src-host'
|
||||
instance = self._create_fake_instance_obj(
|
||||
{'host': 'src_host',
|
||||
'task_state': task_states.MIGRATING})
|
||||
|
@ -5798,7 +5795,7 @@ class ComputeTestCase(BaseTestCase):
|
|||
|
||||
with mock.patch.object(
|
||||
self.compute, '_get_instances_on_driver',
|
||||
return_value=[instance]) as _get_instances_on_driver:
|
||||
return_value=[instance]):
|
||||
try:
|
||||
# We cannot simply use an assertRaises here because the
|
||||
# exception raised is too generally "Exception". To be sure
|
||||
|
@ -6779,9 +6776,9 @@ class ComputeTestCase(BaseTestCase):
|
|||
'instance_uuid': instance.uuid})
|
||||
bdm.create(self.context)
|
||||
|
||||
dev = self.compute.reserve_block_device_name(
|
||||
self.context, instance, '/dev/vdb',
|
||||
'fake-volume-id', 'virtio', 'disk')
|
||||
self.compute.reserve_block_device_name(self.context, instance,
|
||||
'/dev/vdb', 'fake-volume-id',
|
||||
'virtio', 'disk')
|
||||
|
||||
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
|
||||
self.context, instance.uuid)
|
||||
|
@ -7216,7 +7213,6 @@ class ComputeAPITestCase(BaseTestCase):
|
|||
|
||||
# Make sure Compute API updates the image_ref before casting to
|
||||
# compute manager.
|
||||
orig_update = self.compute_api.update
|
||||
info = {'image_ref': None, 'clean': False}
|
||||
|
||||
def fake_rpc_rebuild(context, **kwargs):
|
||||
|
@ -7877,13 +7873,13 @@ class ComputeAPITestCase(BaseTestCase):
|
|||
db.instance_destroy(c, instance4['uuid'])
|
||||
|
||||
def test_all_instance_metadata(self):
|
||||
instance1 = self._create_fake_instance({'metadata': {'key1': 'value1'},
|
||||
'user_id': 'user1',
|
||||
'project_id': 'project1'})
|
||||
self._create_fake_instance({'metadata': {'key1': 'value1'},
|
||||
'user_id': 'user1',
|
||||
'project_id': 'project1'})
|
||||
|
||||
instance2 = self._create_fake_instance({'metadata': {'key2': 'value2'},
|
||||
'user_id': 'user2',
|
||||
'project_id': 'project2'})
|
||||
self._create_fake_instance({'metadata': {'key2': 'value2'},
|
||||
'user_id': 'user2',
|
||||
'project_id': 'project2'})
|
||||
|
||||
_context = self.context
|
||||
_context.user_id = 'user1'
|
||||
|
@ -9446,8 +9442,8 @@ class ComputeAPIAggrTestCase(BaseTestCase):
|
|||
values = _create_service_entries(self.context)
|
||||
fake_zone = values.keys()[0]
|
||||
fake_host = values[fake_zone][0]
|
||||
aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
|
||||
fake_zone, fake_host)
|
||||
self._init_aggregate_with_host(None, 'fake_aggregate1',
|
||||
fake_zone, fake_host)
|
||||
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
|
||||
fake_host)
|
||||
metadata = {'name': 'new_fake_aggregate'}
|
||||
|
@ -9471,8 +9467,8 @@ class ComputeAPIAggrTestCase(BaseTestCase):
|
|||
fake_host = values[fake_zone][0]
|
||||
aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
|
||||
fake_zone, fake_host)
|
||||
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
|
||||
fake_host)
|
||||
self._init_aggregate_with_host(None, 'fake_aggregate2', None,
|
||||
fake_host)
|
||||
metadata = {'availability_zone': 'new_fake_zone'}
|
||||
fake_notifier.NOTIFICATIONS = []
|
||||
aggr1 = self.api.update_aggregate(self.context, aggr1['id'],
|
||||
|
@ -9492,8 +9488,8 @@ class ComputeAPIAggrTestCase(BaseTestCase):
|
|||
values = _create_service_entries(self.context)
|
||||
fake_zone = values.keys()[0]
|
||||
fake_host = values[fake_zone][0]
|
||||
aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
|
||||
fake_zone, fake_host)
|
||||
self._init_aggregate_with_host(None, 'fake_aggregate1',
|
||||
fake_zone, fake_host)
|
||||
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
|
||||
fake_host)
|
||||
metadata = {'availability_zone': 'another_zone'}
|
||||
|
@ -9521,9 +9517,9 @@ class ComputeAPIAggrTestCase(BaseTestCase):
|
|||
values = _create_service_entries(self.context)
|
||||
fake_zone = values.keys()[0]
|
||||
fake_host = values[fake_zone][0]
|
||||
aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
|
||||
CONF.default_availability_zone,
|
||||
fake_host)
|
||||
self._init_aggregate_with_host(None, 'fake_aggregate1',
|
||||
CONF.default_availability_zone,
|
||||
fake_host)
|
||||
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
|
||||
fake_host)
|
||||
metadata = {'availability_zone': 'another_zone'}
|
||||
|
@ -9575,8 +9571,8 @@ class ComputeAPIAggrTestCase(BaseTestCase):
|
|||
values = _create_service_entries(self.context)
|
||||
fake_zone = values.keys()[0]
|
||||
fake_host = values[fake_zone][0]
|
||||
aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
|
||||
fake_zone, fake_host)
|
||||
self._init_aggregate_with_host(None, 'fake_aggregate1',
|
||||
fake_zone, fake_host)
|
||||
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
|
||||
fake_host)
|
||||
metadata = {'foo_key2': 'foo_value3'}
|
||||
|
@ -9602,8 +9598,8 @@ class ComputeAPIAggrTestCase(BaseTestCase):
|
|||
fake_host = values[fake_zone][0]
|
||||
aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
|
||||
fake_zone, fake_host)
|
||||
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
|
||||
fake_host)
|
||||
self._init_aggregate_with_host(None, 'fake_aggregate2', None,
|
||||
fake_host)
|
||||
metadata = {'availability_zone': 'new_fake_zone'}
|
||||
fake_notifier.NOTIFICATIONS = []
|
||||
aggr1 = self.api.update_aggregate_metadata(self.context,
|
||||
|
@ -9623,15 +9619,14 @@ class ComputeAPIAggrTestCase(BaseTestCase):
|
|||
values = _create_service_entries(self.context)
|
||||
fake_zone = values.keys()[0]
|
||||
fake_host = values[fake_zone][0]
|
||||
aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
|
||||
fake_zone, fake_host)
|
||||
self._init_aggregate_with_host(None, 'fake_aggregate1',
|
||||
fake_zone, fake_host)
|
||||
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
|
||||
fake_host)
|
||||
metadata = {'availability_zone': 'another_zone'}
|
||||
self.assertRaises(exception.InvalidAggregateAction,
|
||||
self.api.update_aggregate_metadata,
|
||||
self.context, aggr2['id'], metadata)
|
||||
fake_host2 = values[fake_zone][1]
|
||||
aggr3 = self._init_aggregate_with_host(None, 'fake_aggregate3',
|
||||
None, fake_host)
|
||||
metadata = {'availability_zone': fake_zone}
|
||||
|
|
|
@ -30,7 +30,6 @@ from nova import db
|
|||
from nova import exception
|
||||
from nova.network import model as network_model
|
||||
from nova import objects
|
||||
from nova.objects import base as obj_base
|
||||
from nova.objects import block_device as block_device_obj
|
||||
from nova.objects import external_event as external_event_obj
|
||||
from nova.objects import instance_action as instance_action_obj
|
||||
|
@ -974,7 +973,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
|||
|
||||
def test_check_can_live_migrate_source(self):
|
||||
is_volume_backed = 'volume_backed'
|
||||
bdms = 'bdms'
|
||||
dest_check_data = dict(foo='bar')
|
||||
db_instance = fake_instance.fake_db_instance()
|
||||
instance = objects.Instance._from_db_object(
|
||||
|
@ -987,7 +985,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
|||
self.mox.StubOutWithMock(self.compute.driver,
|
||||
'check_can_live_migrate_source')
|
||||
|
||||
instance_p = obj_base.obj_to_primitive(instance)
|
||||
self.compute.compute_api.is_volume_backed_instance(
|
||||
self.context, instance).AndReturn(is_volume_backed)
|
||||
self.compute.driver.check_can_live_migrate_source(
|
||||
|
|
|
@ -206,8 +206,8 @@ class GetKeypairsTestCase(KeypairAPITestCase):
|
|||
|
||||
class DeleteKeypairTestCase(KeypairAPITestCase):
|
||||
def test_success(self):
|
||||
keypair = self.keypair_api.get_key_pair(self.ctxt, self.ctxt.user_id,
|
||||
self.existing_key_name)
|
||||
self.keypair_api.get_key_pair(self.ctxt, self.ctxt.user_id,
|
||||
self.existing_key_name)
|
||||
self.keypair_api.delete_key_pair(self.ctxt, self.ctxt.user_id,
|
||||
self.existing_key_name)
|
||||
self.assertRaises(exception.KeypairNotFound,
|
||||
|
|
|
@ -110,7 +110,7 @@ class FakeVirtDriver(driver.ComputeDriver):
|
|||
return d
|
||||
|
||||
def estimate_instance_overhead(self, instance_info):
|
||||
mem = instance_info['memory_mb'] # make sure memory value is present
|
||||
instance_info['memory_mb'] # make sure memory value is present
|
||||
overhead = {
|
||||
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
|
||||
}
|
||||
|
@ -601,8 +601,7 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
|
|||
instance = self._fake_instance(memory_mb=claim_mem, root_gb=claim_disk,
|
||||
ephemeral_gb=0)
|
||||
|
||||
claim = self.tracker.instance_claim(self.context, instance,
|
||||
self.limits)
|
||||
self.tracker.instance_claim(self.context, instance, self.limits)
|
||||
|
||||
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["memory_mb"])
|
||||
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
|
||||
|
@ -948,7 +947,7 @@ class ResizeClaimTestCase(BaseTrackerTestCase):
|
|||
'old_instance_type_id': 1, 'new_instance_type_id': 1,
|
||||
'status': 'post-migrating',
|
||||
'instance_uuid': self.instance['uuid']}
|
||||
migration = self._fake_migration_create(self.context, values)
|
||||
self._fake_migration_create(self.context, values)
|
||||
|
||||
# attach an instance to the destination host tracker:
|
||||
dest_tracker.instance_claim(self.context, self.instance)
|
||||
|
|
|
@ -387,33 +387,6 @@ def stub_out_db_instance_api(stubs, injected=True):
|
|||
rxtx_factor=1.0,
|
||||
swap=0)}
|
||||
|
||||
flat_network_fields = {'id': 'fake_flat',
|
||||
'bridge': 'xenbr0',
|
||||
'label': 'fake_flat_network',
|
||||
'netmask': '255.255.255.0',
|
||||
'cidr_v6': 'fe80::a00:0/120',
|
||||
'netmask_v6': '120',
|
||||
'gateway': '10.0.0.1',
|
||||
'gateway_v6': 'fe80::a00:1',
|
||||
'broadcast': '10.0.0.255',
|
||||
'dns': '10.0.0.2',
|
||||
'ra_server': None,
|
||||
'injected': injected}
|
||||
|
||||
vlan_network_fields = {'id': 'fake_vlan',
|
||||
'bridge': 'br111',
|
||||
'label': 'fake_vlan_network',
|
||||
'netmask': '255.255.255.0',
|
||||
'cidr_v6': 'fe80::a00:0/120',
|
||||
'netmask_v6': '120',
|
||||
'gateway': '10.0.0.1',
|
||||
'gateway_v6': 'fe80::a00:1',
|
||||
'broadcast': '10.0.0.255',
|
||||
'dns': '10.0.0.2',
|
||||
'ra_server': None,
|
||||
'vlan': 111,
|
||||
'injected': False}
|
||||
|
||||
fixed_ip_fields = {'address': '10.0.0.3',
|
||||
'address_v6': 'fe80::a00:3',
|
||||
'network_id': 'fake_flat'}
|
||||
|
|
|
@ -362,9 +362,9 @@ class AggregateDBApiTestCase(test.TestCase):
|
|||
a1 = _create_aggregate_with_hosts(context=ctxt)
|
||||
a2 = _create_aggregate_with_hosts(context=ctxt, values=values2)
|
||||
# a3 has no hosts and should not be in the results.
|
||||
a3 = _create_aggregate(context=ctxt, values=values3)
|
||||
_create_aggregate(context=ctxt, values=values3)
|
||||
# a4 has no matching hosts.
|
||||
a4 = _create_aggregate_with_hosts(context=ctxt, values=values4,
|
||||
_create_aggregate_with_hosts(context=ctxt, values=values4,
|
||||
hosts=['foo4.openstack.org'])
|
||||
# a5 has no matching hosts after deleting the only matching host.
|
||||
a5 = _create_aggregate_with_hosts(context=ctxt, values=values5,
|
||||
|
@ -423,8 +423,8 @@ class AggregateDBApiTestCase(test.TestCase):
|
|||
a2_metadata = {'good': 'value12', 'bad': 'badvalue12'}
|
||||
a3_hosts = ['foo2.openstack.org', 'foo3.openstack.org']
|
||||
a3_metadata = {'good': 'value23', 'bad': 'badvalue23'}
|
||||
a1 = _create_aggregate_with_hosts(context=ctxt)
|
||||
a2 = _create_aggregate_with_hosts(context=ctxt, values=values2,
|
||||
_create_aggregate_with_hosts(context=ctxt)
|
||||
_create_aggregate_with_hosts(context=ctxt, values=values2,
|
||||
hosts=a2_hosts, metadata=a2_metadata)
|
||||
a3 = _create_aggregate_with_hosts(context=ctxt, values=values3,
|
||||
hosts=a3_hosts, metadata=a3_metadata)
|
||||
|
@ -447,10 +447,10 @@ class AggregateDBApiTestCase(test.TestCase):
|
|||
a2_metadata = {'good': 'value12', 'bad': 'badvalue12'}
|
||||
a3_hosts = ['foo2.openstack.org', 'foo3.openstack.org']
|
||||
a3_metadata = {'good': 'value23', 'bad': 'badvalue23'}
|
||||
a1 = _create_aggregate_with_hosts(context=ctxt)
|
||||
a2 = _create_aggregate_with_hosts(context=ctxt, values=values2,
|
||||
_create_aggregate_with_hosts(context=ctxt)
|
||||
_create_aggregate_with_hosts(context=ctxt, values=values2,
|
||||
hosts=a2_hosts, metadata=a2_metadata)
|
||||
a3 = _create_aggregate_with_hosts(context=ctxt, values=values3,
|
||||
_create_aggregate_with_hosts(context=ctxt, values=values3,
|
||||
hosts=a3_hosts, metadata=a3_metadata)
|
||||
r1 = db.aggregate_host_get_by_metadata_key(ctxt, key='good')
|
||||
self.assertEqual({
|
||||
|
@ -1114,8 +1114,8 @@ class SecurityGroupRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
|||
self.assertEqual(rules[0]['id'], security_group_rule['id'])
|
||||
|
||||
def test_security_group_rule_destroy(self):
|
||||
security_group1 = self._create_security_group({'name': 'fake1'})
|
||||
security_group2 = self._create_security_group({'name': 'fake2'})
|
||||
self._create_security_group({'name': 'fake1'})
|
||||
self._create_security_group({'name': 'fake2'})
|
||||
security_group_rule1 = self._create_security_group_rule({})
|
||||
security_group_rule2 = self._create_security_group_rule({})
|
||||
db.security_group_rule_destroy(self.ctxt, security_group_rule1['id'])
|
||||
|
@ -1133,7 +1133,7 @@ class SecurityGroupRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
|||
def test_security_group_rule_get(self):
|
||||
security_group_rule1 = (
|
||||
self._create_security_group_rule({}))
|
||||
security_group_rule2 = self._create_security_group_rule({})
|
||||
self._create_security_group_rule({})
|
||||
real_security_group_rule = db.security_group_rule_get(self.ctxt,
|
||||
security_group_rule1['id'])
|
||||
self._assertEqualObjects(security_group_rule1,
|
||||
|
@ -1359,7 +1359,6 @@ class SecurityGroupTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
|||
'project_id': 'fake_proj1',
|
||||
}
|
||||
|
||||
columns_to_join = ['rules.grantee_group']
|
||||
updated_group = db.security_group_update(self.ctxt,
|
||||
security_group['id'],
|
||||
new_values,
|
||||
|
@ -1369,7 +1368,7 @@ class SecurityGroupTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
|||
self.assertEqual(updated_group['rules'], [])
|
||||
|
||||
def test_security_group_update_to_duplicate(self):
|
||||
security_group1 = self._create_security_group(
|
||||
self._create_security_group(
|
||||
{'name': 'fake1', 'project_id': 'fake_proj1'})
|
||||
security_group2 = self._create_security_group(
|
||||
{'name': 'fake1', 'project_id': 'fake_proj2'})
|
||||
|
@ -1722,7 +1721,7 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
|||
def test_instance_get_all_by_filters_deleted_and_soft_deleted(self):
|
||||
inst1 = self.create_instance_with_args()
|
||||
inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
|
||||
inst3 = self.create_instance_with_args()
|
||||
self.create_instance_with_args()
|
||||
db.instance_destroy(self.ctxt, inst1['uuid'])
|
||||
result = db.instance_get_all_by_filters(self.ctxt,
|
||||
{'deleted': True})
|
||||
|
@ -1733,8 +1732,8 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
|||
|
||||
def test_instance_get_all_by_filters_deleted_no_soft_deleted(self):
|
||||
inst1 = self.create_instance_with_args()
|
||||
inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
|
||||
inst3 = self.create_instance_with_args()
|
||||
self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
|
||||
self.create_instance_with_args()
|
||||
db.instance_destroy(self.ctxt, inst1['uuid'])
|
||||
result = db.instance_get_all_by_filters(self.ctxt,
|
||||
{'deleted': True,
|
||||
|
@ -1755,7 +1754,7 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
|||
|
||||
def test_instance_get_all_by_filters_not_deleted(self):
|
||||
inst1 = self.create_instance_with_args()
|
||||
inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
|
||||
self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
|
||||
inst3 = self.create_instance_with_args()
|
||||
inst4 = self.create_instance_with_args(vm_state=vm_states.ACTIVE)
|
||||
db.instance_destroy(self.ctxt, inst1['uuid'])
|
||||
|
@ -1964,11 +1963,7 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
|||
set_and_check(meta)
|
||||
|
||||
def test_security_group_in_use(self):
|
||||
instance = db.instance_create(self.ctxt, dict(host='foo'))
|
||||
values = [
|
||||
{'instances': [instance]},
|
||||
{'instances': []},
|
||||
]
|
||||
db.instance_create(self.ctxt, dict(host='foo'))
|
||||
|
||||
def test_instance_update_updates_system_metadata(self):
|
||||
# Ensure that system_metadata is updated during instance_update
|
||||
|
@ -4930,7 +4925,7 @@ class NetworkTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
|||
def test_network_delete_safe(self):
|
||||
values = {'host': 'localhost', 'project_id': 'project1'}
|
||||
network = db.network_create_safe(self.ctxt, values)
|
||||
db_network = db.network_get(self.ctxt, network['id'])
|
||||
db.network_get(self.ctxt, network['id'])
|
||||
values = {'network_id': network['id'], 'address': '192.168.1.5'}
|
||||
address1 = db.fixed_ip_create(self.ctxt, values)['address']
|
||||
values = {'network_id': network['id'],
|
||||
|
@ -5012,7 +5007,7 @@ class NetworkTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
|||
# network with fixed ip with host set
|
||||
net2 = db.network_create_safe(self.ctxt, {})
|
||||
db.fixed_ip_create(self.ctxt, {'host': host, 'network_id': net2.id})
|
||||
data = db.network_get_all_by_host(self.ctxt, host)
|
||||
db.network_get_all_by_host(self.ctxt, host)
|
||||
self._assertEqualListsOfObjects([net1, net2],
|
||||
db.network_get_all_by_host(self.ctxt, host))
|
||||
# network with instance with host set
|
||||
|
@ -5311,9 +5306,9 @@ class QuotaTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
|||
network = db.network_create_safe(self.ctxt, {})
|
||||
for i in range(2):
|
||||
address = '192.168.0.%d' % i
|
||||
ip = db.fixed_ip_create(self.ctxt, {'project_id': 'project1',
|
||||
'address': address,
|
||||
'network_id': network['id']})
|
||||
db.fixed_ip_create(self.ctxt, {'project_id': 'project1',
|
||||
'address': address,
|
||||
'network_id': network['id']})
|
||||
db.fixed_ip_associate(self.ctxt, address,
|
||||
instances[0].uuid, network['id'])
|
||||
|
||||
|
@ -6625,7 +6620,7 @@ class InstanceGroupDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
|||
def test_instance_group_create_with_same_key(self):
|
||||
values = self._get_default_values()
|
||||
values['uuid'] = 'fake_id'
|
||||
result = self._create_instance_group(self.context, values)
|
||||
self._create_instance_group(self.context, values)
|
||||
self.assertRaises(exception.InstanceGroupIdExists,
|
||||
self._create_instance_group, self.context, values)
|
||||
|
||||
|
@ -7030,8 +7025,6 @@ class PciDeviceDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
|
|||
def test_pci_device_get_by_id(self):
|
||||
v1, v2 = self._create_fake_pci_devs()
|
||||
result = db.pci_device_get_by_id(self.admin_context, 3353)
|
||||
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
|
||||
'created_at']
|
||||
self._assertEqualObjects(v1, result, self.ignored_keys)
|
||||
|
||||
def test_pci_device_get_by_id_not_found(self):
|
||||
|
|
|
@ -359,7 +359,6 @@ class TestGlanceImageService(test.NoDBTestCase):
|
|||
def test_download_module_file_bad_module(self):
|
||||
_, data_filename = self._get_tempfile()
|
||||
file_url = 'applesauce://%s' % data_filename
|
||||
data_called = False
|
||||
|
||||
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
|
||||
data_called = False
|
||||
|
@ -841,7 +840,7 @@ class TestDetail(test.NoDBTestCase):
|
|||
client.call.return_value = [mock.sentinel.images_0]
|
||||
ctx = mock.sentinel.ctx
|
||||
service = glance.GlanceImageService(client)
|
||||
images = service.detail(ctx, **params)
|
||||
service.detail(ctx, **params)
|
||||
|
||||
client.call.assert_called_once_with(ctx, 1, 'list', limit=10)
|
||||
|
||||
|
@ -1006,7 +1005,7 @@ class TestDelete(test.NoDBTestCase):
|
|||
client.call.return_value = True
|
||||
ctx = mock.sentinel.ctx
|
||||
service = glance.GlanceImageService(client)
|
||||
result = service.delete(ctx, mock.sentinel.image_id)
|
||||
service.delete(ctx, mock.sentinel.image_id)
|
||||
client.call.assert_called_once_with(ctx, 1, 'delete',
|
||||
mock.sentinel.image_id)
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@ class ApiSampleTestBaseV3(api_samples_test_base.ApiSampleTestBase):
|
|||
extra_extensions_to_load = None
|
||||
|
||||
def setUp(self):
|
||||
extends = []
|
||||
self.flags(use_ipv6=False,
|
||||
osapi_compute_link_prefix=self._get_host(),
|
||||
osapi_glance_link_prefix=self._get_glance_host())
|
||||
|
|
|
@ -79,8 +79,6 @@ class ExtendedVolumesSampleJsonTests(test_servers.ServersSampleBase):
|
|||
|
||||
def test_attach_volume(self):
|
||||
device_name = '/dev/vdd'
|
||||
disk_bus = 'ide'
|
||||
device_type = 'cdrom'
|
||||
self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
|
||||
self.stubs.Set(cinder.API, 'check_attach', lambda *a, **k: None)
|
||||
self.stubs.Set(cinder.API, 'reserve_volume', lambda *a, **k: None)
|
||||
|
|
|
@ -194,7 +194,6 @@ class TestNeutronDriver(test.NoDBTestCase):
|
|||
{'id': '2', 'device_id': 'dev_1', 'security_groups': ['2']}]
|
||||
port_list = {'ports': ports}
|
||||
sg1 = {'id': '1', 'name': 'wol'}
|
||||
sg2 = {'id': '2', 'name': 'eor'}
|
||||
# User doesn't have access to sg2
|
||||
security_groups_list = {'security_groups': [sg1]}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ from nova.virt import netutils
|
|||
class RouteTests(test.NoDBTestCase):
|
||||
def test_create_route_with_attrs(self):
|
||||
route = fake_network_cache_model.new_route()
|
||||
ip = fake_network_cache_model.new_ip(dict(address='192.168.1.1'))
|
||||
fake_network_cache_model.new_ip(dict(address='192.168.1.1'))
|
||||
self.assertEqual(route['cidr'], '0.0.0.0/24')
|
||||
self.assertEqual(route['gateway']['address'], '192.168.1.1')
|
||||
self.assertEqual(route['interface'], 'eth0')
|
||||
|
@ -323,13 +323,8 @@ class NetworkTests(test.NoDBTestCase):
|
|||
self.assertNotEqual(network1, network2)
|
||||
|
||||
def test_hydrate(self):
|
||||
new_network = dict(
|
||||
id=1,
|
||||
bridge='br0',
|
||||
label='public',
|
||||
subnets=[fake_network_cache_model.new_subnet(),
|
||||
fake_network_cache_model.new_subnet(
|
||||
dict(cidr='255.255.255.255'))])
|
||||
fake_network_cache_model.new_subnet()
|
||||
fake_network_cache_model.new_subnet(dict(cidr='255.255.255.255'))
|
||||
network = model.Network.hydrate(fake_network_cache_model.new_network())
|
||||
|
||||
self.assertEqual(network['id'], 1)
|
||||
|
@ -428,10 +423,7 @@ class VIFTests(test.NoDBTestCase):
|
|||
self.assertEqual(labeled_ips, ip_dict)
|
||||
|
||||
def test_hydrate(self):
|
||||
new_vif = dict(
|
||||
id=1,
|
||||
address='127.0.0.1',
|
||||
network=fake_network_cache_model.new_network())
|
||||
fake_network_cache_model.new_network()
|
||||
vif = model.VIF.hydrate(fake_network_cache_model.new_vif())
|
||||
self.assertEqual(vif['id'], 1)
|
||||
self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
|
||||
|
|
|
@ -72,7 +72,7 @@ class _TestDNSDomain(object):
|
|||
def test_get_all(self):
|
||||
with mock.patch.object(db, 'dnsdomain_get_all') as get:
|
||||
get.return_value = [fake_dnsd]
|
||||
dns_domains = dns_domain.DNSDomainList.get_all(self.context)
|
||||
dns_domain.DNSDomainList.get_all(self.context)
|
||||
|
||||
|
||||
class TestDNSDomainObject(test_objects._LocalTest,
|
||||
|
|
|
@ -113,8 +113,7 @@ class _TestInstanceGroupObjects(test.TestCase):
|
|||
metadata = {'foo': 'bar'}
|
||||
obj_result.metadetails = metadata
|
||||
obj_result.save()
|
||||
metadata1 = db.instance_group_metadata_get(self.context,
|
||||
db_result['uuid'])
|
||||
db.instance_group_metadata_get(self.context, db_result['uuid'])
|
||||
for key, value in metadata.iteritems():
|
||||
self.assertEqual(value, metadata[key])
|
||||
|
||||
|
|
|
@ -269,7 +269,6 @@ class PciDevTrackerTestCase(test.TestCase):
|
|||
def test_clean_usage(self):
|
||||
inst_2 = copy.copy(self.inst)
|
||||
inst_2.uuid = 'uuid5'
|
||||
inst = {'uuid': 'uuid1', 'vm_state': vm_states.BUILDING}
|
||||
migr = {'instance_uuid': 'uuid2', 'vm_state': vm_states.BUILDING}
|
||||
orph = {'uuid': 'uuid3', 'vm_state': vm_states.BUILDING}
|
||||
|
||||
|
@ -289,7 +288,6 @@ class PciDevTrackerTestCase(test.TestCase):
|
|||
def test_clean_usage_claims(self):
|
||||
inst_2 = copy.copy(self.inst)
|
||||
inst_2.uuid = 'uuid5'
|
||||
inst = {'uuid': 'uuid1', 'vm_state': vm_states.BUILDING}
|
||||
migr = {'instance_uuid': 'uuid2', 'vm_state': vm_states.BUILDING}
|
||||
orph = {'uuid': 'uuid3', 'vm_state': vm_states.BUILDING}
|
||||
|
||||
|
@ -328,7 +326,7 @@ class PciGetInstanceDevs(test.TestCase):
|
|||
self.stubs.Set(db, 'pci_device_get_all_by_instance_uuid',
|
||||
_fake_pci_device_get_by_instance_uuid)
|
||||
self._get_by_uuid = False
|
||||
devices = pci_manager.get_instance_pci_devs(instance)
|
||||
pci_manager.get_instance_pci_devs(instance)
|
||||
self.assertEqual(self._get_by_uuid, True)
|
||||
|
||||
def test_get_devs_object(self):
|
||||
|
@ -348,5 +346,5 @@ class PciGetInstanceDevs(test.TestCase):
|
|||
_fake_obj_load_attr)
|
||||
|
||||
self.load_attr_called = False
|
||||
devices = pci_manager.get_instance_pci_devs(inst)
|
||||
pci_manager.get_instance_pci_devs(inst)
|
||||
self.assertEqual(self.load_attr_called, True)
|
||||
|
|
|
@ -46,7 +46,7 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||
def test_get_all_host_states_returns_cached_value(self, mock_up_hosts):
|
||||
self.driver.all_host_states = []
|
||||
|
||||
result = self.driver._get_all_host_states(self.context)
|
||||
self.driver._get_all_host_states(self.context)
|
||||
|
||||
self.assertFalse(mock_up_hosts.called)
|
||||
self.assertEqual([], self.driver.all_host_states)
|
||||
|
|
|
@ -166,9 +166,9 @@ class FiltersTestCase(test.NoDBTestCase):
|
|||
|
||||
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
|
||||
filter_classes = [Filter1, Filter2]
|
||||
result = filter_handler.get_filtered_objects(filter_classes,
|
||||
filter_objs_initial,
|
||||
filter_properties)
|
||||
filter_handler.get_filtered_objects(filter_classes,
|
||||
filter_objs_initial,
|
||||
filter_properties)
|
||||
|
||||
def test_get_filtered_objects_none_response(self):
|
||||
filter_objs_initial = ['initial', 'filter1', 'objects1']
|
||||
|
|
|
@ -124,7 +124,7 @@ class AvailabilityZoneTestCases(test.TestCase):
|
|||
"""Test set availability zone cache key is unicode."""
|
||||
service = self._create_service_with_topic('network', self.host)
|
||||
services = db.service_get_all(self.context)
|
||||
new_service = az.set_availability_zones(self.context, services)[0]
|
||||
az.set_availability_zones(self.context, services)
|
||||
self.assertIsInstance(services[0]['host'], unicode)
|
||||
cached_key = az._make_cache_key(services[0]['host'])
|
||||
self.assertIsInstance(cached_key, str)
|
||||
|
|
|
@ -480,7 +480,7 @@ class CreateInstanceTypeTest(test.TestCase):
|
|||
|
||||
def test_rxtx_factor_must_be_within_sql_float_range(self):
|
||||
_context = context.get_admin_context()
|
||||
inst_types = db.flavor_get_all(_context)
|
||||
db.flavor_get_all(_context)
|
||||
# We do * 10 since this is an approximation and we need to make sure
|
||||
# the difference is noticeble.
|
||||
over_rxtx_factor = flavors.SQL_SP_FLOAT_MAX * 10
|
||||
|
|
|
@ -239,7 +239,7 @@ class MetadataTestCase(test.TestCase):
|
|||
'swap': '/dev/sdc',
|
||||
'ebs0': '/dev/sdh'}
|
||||
|
||||
capi = conductor_api.LocalAPI()
|
||||
conductor_api.LocalAPI()
|
||||
|
||||
self.assertEqual(base._format_instance_mapping(ctxt,
|
||||
instance_ref0), block_device._DEFAULT_MAPPINGS)
|
||||
|
|
|
@ -123,8 +123,6 @@ class QuotaIntegrationTestCase(test.TestCase):
|
|||
# Setting cores quota to unlimited:
|
||||
self.flags(quota_cores=-1)
|
||||
instance = self._create_instance(cores=4)
|
||||
inst_type = flavors.get_flavor_by_name('m1.small')
|
||||
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
|
||||
db.instance_destroy(self.context, instance['uuid'])
|
||||
|
||||
def test_too_many_addresses(self):
|
||||
|
@ -2118,7 +2116,7 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
|
|||
def test_quota_reserve_cores_unlimited(self):
|
||||
# Requesting 8 cores, quota_cores set to unlimited:
|
||||
self.flags(quota_cores=-1)
|
||||
context = self._init_usages(1, 8, 1 * 1024, 1)
|
||||
self._init_usages(1, 8, 1 * 1024, 1)
|
||||
self.assertEqual(self.sync_called, set([]))
|
||||
self.usages_list[0]["in_use"] = 1
|
||||
self.usages_list[0]["reserved"] = 0
|
||||
|
@ -2135,7 +2133,7 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
|
|||
def test_quota_reserve_ram_unlimited(self):
|
||||
# Requesting 10*1024 ram, quota_ram set to unlimited:
|
||||
self.flags(quota_ram=-1)
|
||||
context = self._init_usages(1, 1, 10 * 1024, 1)
|
||||
self._init_usages(1, 1, 10 * 1024, 1)
|
||||
self.assertEqual(self.sync_called, set([]))
|
||||
self.usages_list[0]["in_use"] = 1
|
||||
self.usages_list[0]["reserved"] = 0
|
||||
|
|
|
@ -48,7 +48,6 @@ class TestWeigher(test.NoDBTestCase):
|
|||
((20.0, 50.0), (0.4, 1.0), 0.0, None),
|
||||
((20.0, 50.0), (0.2, 0.5), 0.0, 100.0),
|
||||
)
|
||||
normalize_to = (1.0, 10.0)
|
||||
for seq, result, minval, maxval in map_:
|
||||
ret = weights.normalize(seq, minval=minval, maxval=maxval)
|
||||
self.assertEqual(tuple(ret), result)
|
||||
|
|
|
@ -105,7 +105,7 @@ class TestWSGIServer(test.NoDBTestCase):
|
|||
|
||||
def test_custom_max_header_line(self):
|
||||
CONF.max_header_line = 4096 # Default value is 16384.
|
||||
server = nova.wsgi.Server("test_custom_max_header_line", None)
|
||||
nova.wsgi.Server("test_custom_max_header_line", None)
|
||||
self.assertEqual(CONF.max_header_line, eventlet.wsgi.MAX_HEADER_LINE)
|
||||
|
||||
def test_start_random_port(self):
|
||||
|
|
|
@ -105,7 +105,6 @@ def get_test_network_info(count=1):
|
|||
ipv6 = CONF.use_ipv6
|
||||
fake = 'fake'
|
||||
fake_ip = '0.0.0.0'
|
||||
fake_netmask = '255.255.255.255'
|
||||
fake_vlan = 100
|
||||
fake_bridge_interface = 'eth0'
|
||||
|
||||
|
@ -209,6 +208,5 @@ def is_ipv6_supported():
|
|||
|
||||
|
||||
def get_api_version(request):
|
||||
api_version = 2
|
||||
if request.path[2:3].isdigit():
|
||||
return int(request.path[2:3])
|
||||
|
|
|
@ -503,7 +503,7 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase):
|
|||
iqn = "iqn-%s" % self.instance['uuid']
|
||||
pxe_config = 'this is a fake pxe config'
|
||||
pxe_path = pxe.get_pxe_config_file_path(self.instance)
|
||||
image_path = pxe.get_image_file_path(self.instance)
|
||||
pxe.get_image_file_path(self.instance)
|
||||
|
||||
self.mox.StubOutWithMock(flavor_obj.Flavor, 'get_by_id')
|
||||
self.mox.StubOutWithMock(pxe, 'get_tftp_image_info')
|
||||
|
|
|
@ -312,11 +312,8 @@ class TileraPublicMethodsTestCase(BareMetalTileraTestCase):
|
|||
'kernel': [None, 'cccc'],
|
||||
}
|
||||
self.instance['uuid'] = 'fake-uuid'
|
||||
iqn = "iqn-%s" % self.instance['uuid']
|
||||
tilera_config = 'this is a fake tilera config'
|
||||
self.instance['uuid'] = 'fake-uuid'
|
||||
tilera_path = tilera.get_tilera_nfs_path(self.instance)
|
||||
image_path = tilera.get_image_file_path(self.instance)
|
||||
tilera.get_tilera_nfs_path(self.instance)
|
||||
tilera.get_image_file_path(self.instance)
|
||||
|
||||
self.mox.StubOutWithMock(tilera, 'get_tftp_image_info')
|
||||
self.mox.StubOutWithMock(tilera, 'get_partition_sizes')
|
||||
|
@ -334,8 +331,8 @@ class TileraPublicMethodsTestCase(BareMetalTileraTestCase):
|
|||
def test_activate_and_deactivate_bootloader(self):
|
||||
self._create_node()
|
||||
self.instance['uuid'] = 'fake-uuid'
|
||||
tilera_path = tilera.get_tilera_nfs_path(self.instance)
|
||||
image_path = tilera.get_image_file_path(self.instance)
|
||||
tilera.get_tilera_nfs_path(self.instance)
|
||||
tilera.get_image_file_path(self.instance)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
|
||||
|
@ -365,7 +362,7 @@ class TileraPublicMethodsTestCase(BareMetalTileraTestCase):
|
|||
self.mox.StubOutWithMock(tilera, 'get_tftp_image_info')
|
||||
self.mox.StubOutWithMock(self.driver, '_collect_mac_addresses')
|
||||
|
||||
tilera_path = tilera.get_tilera_nfs_path(self.node['id'])
|
||||
tilera.get_tilera_nfs_path(self.node['id'])
|
||||
|
||||
tilera.get_tftp_image_info(self.instance).\
|
||||
AndRaise(exception.NovaException)
|
||||
|
|
|
@ -141,7 +141,6 @@ class APITestCase(test.NoDBTestCase):
|
|||
def test_extend_raw_success(self):
|
||||
imgfile = tempfile.NamedTemporaryFile()
|
||||
imgsize = 10
|
||||
device = "/dev/sdh"
|
||||
use_cow = False
|
||||
|
||||
self.mox.StubOutWithMock(api, 'can_resize_image')
|
||||
|
|
|
@ -389,9 +389,9 @@ class HyperVAPITestCase(HyperVAPIBaseTestCase):
|
|||
self._test_spawn_instance(False, vhd_format=constants.DISK_FORMAT_VHDX)
|
||||
|
||||
def _setup_spawn_config_drive_mocks(self, use_cdrom):
|
||||
im = instance_metadata.InstanceMetadata(mox.IgnoreArg(),
|
||||
content=mox.IsA(list),
|
||||
extra_md=mox.IsA(dict))
|
||||
instance_metadata.InstanceMetadata(mox.IgnoreArg(),
|
||||
content=mox.IsA(list),
|
||||
extra_md=mox.IsA(dict))
|
||||
|
||||
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
|
||||
m.AndReturn(self._test_instance_dir)
|
||||
|
@ -661,7 +661,6 @@ class HyperVAPITestCase(HyperVAPIBaseTestCase):
|
|||
dest_server = 'fake_server'
|
||||
|
||||
instance_data = self._get_instance_data()
|
||||
instance_name = instance_data['name']
|
||||
|
||||
fake_post_method = self._mox.CreateMockAnything()
|
||||
if not test_failure and not unsupported_os:
|
||||
|
@ -673,16 +672,9 @@ class HyperVAPITestCase(HyperVAPIBaseTestCase):
|
|||
fake_recover_method(self._context, instance_data, dest_server,
|
||||
False)
|
||||
|
||||
fake_ide_controller_path = 'fakeide'
|
||||
fake_scsi_controller_path = 'fakescsi'
|
||||
|
||||
if with_volumes:
|
||||
fake_scsi_disk_path = 'fake_scsi_disk_path'
|
||||
fake_target_iqn = 'fake_target_iqn'
|
||||
fake_target_lun = 1
|
||||
fake_scsi_paths = {0: fake_scsi_disk_path}
|
||||
else:
|
||||
fake_scsi_paths = {}
|
||||
|
||||
if not unsupported_os:
|
||||
m = livemigrationutils.LiveMigrationUtils.live_migrate_vm(
|
||||
|
@ -1269,7 +1261,6 @@ class HyperVAPITestCase(HyperVAPIBaseTestCase):
|
|||
boot_from_volume=False):
|
||||
fake_mounted_disk = "fake_mounted disk"
|
||||
fake_device_number = 0
|
||||
fake_controller_path = "fake_scsi_controller_path"
|
||||
|
||||
self._mock_login_storage_target(target_iqn, target_lun,
|
||||
target_portal,
|
||||
|
@ -1280,7 +1271,7 @@ class HyperVAPITestCase(HyperVAPIBaseTestCase):
|
|||
fake_mounted_disk,
|
||||
fake_device_number)
|
||||
|
||||
m = volumeutils.VolumeUtils.logout_storage_target(target_iqn)
|
||||
volumeutils.VolumeUtils.logout_storage_target(target_iqn)
|
||||
|
||||
def test_attach_volume_logout(self):
|
||||
instance_data = self._get_instance_data()
|
||||
|
@ -1318,8 +1309,6 @@ class HyperVAPITestCase(HyperVAPIBaseTestCase):
|
|||
None, connection_info, instance_data, mount_point)
|
||||
|
||||
def _mock_detach_volume(self, target_iqn, target_lun):
|
||||
mount_point = '/dev/sdc'
|
||||
|
||||
fake_mounted_disk = "fake_mounted_disk"
|
||||
fake_device_number = 0
|
||||
m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
|
||||
|
@ -1336,14 +1325,14 @@ class HyperVAPITestCase(HyperVAPIBaseTestCase):
|
|||
|
||||
def test_detach_volume(self):
|
||||
instance_data = self._get_instance_data()
|
||||
instance_name = instance_data['name']
|
||||
self.assertIn('name', instance_data)
|
||||
|
||||
connection_info = db_fakes.get_fake_volume_info_data(
|
||||
self._volume_target_portal, self._volume_id)
|
||||
data = connection_info['data']
|
||||
target_lun = data['target_lun']
|
||||
target_iqn = data['target_iqn']
|
||||
target_portal = data['target_portal']
|
||||
self.assertIn('target_portal', data)
|
||||
|
||||
mount_point = '/dev/sdc'
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ class VHDUtilsTestCase(test.NoDBTestCase):
|
|||
def test_get_vhd_format_vhdx(self):
|
||||
with mock.patch('nova.virt.hyperv.vhdutils.open',
|
||||
mock.mock_open(read_data=vhdutils.VHDX_SIGNATURE),
|
||||
create=True) as mock_open:
|
||||
create=True):
|
||||
|
||||
format = self._vhdutils.get_vhd_format(self._FAKE_VHD_PATH)
|
||||
|
||||
|
|
|
@ -1026,8 +1026,6 @@ def openAuth(uri, auth, flags):
|
|||
raise Exception(
|
||||
_("Expected a function in 'auth[1]' parameter"))
|
||||
|
||||
connection_used = True
|
||||
|
||||
return Connection(uri, (flags == VIR_CONNECT_RO))
|
||||
|
||||
|
||||
|
|
|
@ -572,7 +572,6 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
|
|||
self.stubs.Set(virtutils, 'chown', lambda x, y: None)
|
||||
|
||||
# We need to stub utime as well
|
||||
orig_utime = os.utime
|
||||
self.stubs.Set(os, 'utime', lambda x, y: None)
|
||||
|
||||
# Fake up some instances in the instances directory
|
||||
|
|
|
@ -1089,8 +1089,8 @@ class LibvirtConnTestCase(test.TestCase):
|
|||
# This will exercise the failed code path still,
|
||||
# and won't require fakes and stubs of the iscsi discovery
|
||||
block_device_info = {}
|
||||
cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
|
||||
None, block_device_info)
|
||||
conn.get_guest_config(instance_ref, [], None, disk_info,
|
||||
None, block_device_info)
|
||||
instance_ref = db.instance_get(self.context, instance_ref['id'])
|
||||
self.assertEqual(instance_ref['root_device_name'], '/dev/vda')
|
||||
|
||||
|
@ -3578,8 +3578,8 @@ class LibvirtConnTestCase(test.TestCase):
|
|||
rescue=None, expect_xen_hvm=False, xen_only=False):
|
||||
user_context = context.RequestContext(self.user_id, self.project_id)
|
||||
instance_ref = db.instance_create(user_context, instance)
|
||||
network_ref = db.project_get_networks(context.get_admin_context(),
|
||||
self.project_id)[0]
|
||||
db.project_get_networks(context.get_admin_context(),
|
||||
self.project_id)[0]
|
||||
|
||||
xen_vm_mode = vm_mode.XEN
|
||||
if expect_xen_hvm:
|
||||
|
@ -3886,7 +3886,7 @@ class LibvirtConnTestCase(test.TestCase):
|
|||
compute_info, compute_info, False)
|
||||
|
||||
def test_check_can_live_migrate_dest_cleanup_works_correctly(self):
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
db.instance_create(self.context, self.test_instance)
|
||||
dest_check_data = {"filename": "file",
|
||||
"block_migration": True,
|
||||
"disk_over_commit": False,
|
||||
|
@ -4741,10 +4741,8 @@ class LibvirtConnTestCase(test.TestCase):
|
|||
instance,
|
||||
None,
|
||||
image_meta)
|
||||
conn._create_image(context, instance,
|
||||
disk_info['mapping'])
|
||||
xml = conn.to_xml(self.context, instance, None,
|
||||
disk_info, image_meta)
|
||||
conn._create_image(context, instance, disk_info['mapping'])
|
||||
conn.to_xml(self.context, instance, None, disk_info, image_meta)
|
||||
|
||||
wantFiles = [
|
||||
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
|
||||
|
@ -4823,10 +4821,8 @@ class LibvirtConnTestCase(test.TestCase):
|
|||
instance,
|
||||
None,
|
||||
image_meta)
|
||||
conn._create_image(context, instance,
|
||||
disk_info['mapping'])
|
||||
xml = conn.to_xml(self.context, instance, None,
|
||||
disk_info, image_meta)
|
||||
conn._create_image(context, instance, disk_info['mapping'])
|
||||
conn.to_xml(self.context, instance, None, disk_info, image_meta)
|
||||
|
||||
wantFiles = [
|
||||
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
|
||||
|
@ -5453,7 +5449,6 @@ class LibvirtConnTestCase(test.TestCase):
|
|||
|
||||
def _check_xml_bus(name, xml, block_info):
|
||||
tree = etree.fromstring(xml)
|
||||
got_disks = tree.findall('./devices/disk')
|
||||
got_disk_targets = tree.findall('./devices/disk/target')
|
||||
system_meta = utils.instance_sys_meta(instance)
|
||||
image_meta = utils.get_image_from_system_metadata(system_meta)
|
||||
|
@ -7639,11 +7634,11 @@ class IptablesFirewallTestCase(test.TestCase):
|
|||
|
||||
admin_ctxt = context.get_admin_context()
|
||||
# add a rule and send the update message, check for 1 rule
|
||||
provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
|
||||
{'protocol': 'tcp',
|
||||
'cidr': '10.99.99.99/32',
|
||||
'from_port': 1,
|
||||
'to_port': 65535})
|
||||
db.provider_fw_rule_create(admin_ctxt,
|
||||
{'protocol': 'tcp',
|
||||
'cidr': '10.99.99.99/32',
|
||||
'from_port': 1,
|
||||
'to_port': 65535})
|
||||
self.fw.refresh_provider_fw_rules()
|
||||
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
|
||||
if rule.chain == 'provider']
|
||||
|
@ -7709,9 +7704,7 @@ class NWFilterTestCase(test.TestCase):
|
|||
ip_protocol='tcp',
|
||||
cidr_ip='0.0.0.0/0')
|
||||
|
||||
security_group = db.security_group_get_by_name(self.context,
|
||||
'fake',
|
||||
'testgroup')
|
||||
db.security_group_get_by_name(self.context, 'fake', 'testgroup')
|
||||
self.teardown_security_group()
|
||||
|
||||
def teardown_security_group(self):
|
||||
|
@ -9238,7 +9231,7 @@ class LibvirtVolumeSnapshotTestCase(test.TestCase):
|
|||
instance = db.instance_create(self.c, self.inst)
|
||||
snapshot_id = 'snapshot-1234'
|
||||
|
||||
domain = FakeVirtDomain(fake_xml=self.dom_xml)
|
||||
FakeVirtDomain(fake_xml=self.dom_xml)
|
||||
|
||||
self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
|
||||
self.mox.StubOutWithMock(self.conn, '_volume_api')
|
||||
|
@ -9269,7 +9262,7 @@ class LibvirtVolumeSnapshotTestCase(test.TestCase):
|
|||
instance = db.instance_create(self.c, self.inst)
|
||||
snapshot_id = '1234-9876'
|
||||
|
||||
domain = FakeVirtDomain(fake_xml=self.dom_xml)
|
||||
FakeVirtDomain(fake_xml=self.dom_xml)
|
||||
|
||||
self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
|
||||
self.mox.StubOutWithMock(self.conn, '_volume_api')
|
||||
|
@ -9300,7 +9293,7 @@ class LibvirtVolumeSnapshotTestCase(test.TestCase):
|
|||
def test_volume_snapshot_delete_invalid_type(self):
|
||||
instance = db.instance_create(self.c, self.inst)
|
||||
|
||||
domain = FakeVirtDomain(fake_xml=self.dom_xml)
|
||||
FakeVirtDomain(fake_xml=self.dom_xml)
|
||||
|
||||
self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
|
||||
self.mox.StubOutWithMock(self.conn, '_volume_api')
|
||||
|
|
|
@ -178,7 +178,6 @@ blah BLAH: bb
|
|||
user = 'user'
|
||||
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
|
||||
self.flags(rbd_user=user, group='libvirt')
|
||||
fn = self.mox.CreateMockAnything()
|
||||
self.mox.StubOutWithMock(libvirt_utils.utils,
|
||||
'execute')
|
||||
libvirt_utils.utils.execute('rbd', '-p', pool, 'ls', '--id',
|
||||
|
@ -197,7 +196,6 @@ blah BLAH: bb
|
|||
names = ['volume1', 'volume2', 'volume3']
|
||||
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
|
||||
self.flags(rbd_user=user, group='libvirt')
|
||||
fn = self.mox.CreateMockAnything()
|
||||
self.mox.StubOutWithMock(libvirt_utils.utils, 'execute')
|
||||
libvirt_utils.utils.execute('rbd', '-p', pool, 'rm', 'volume1',
|
||||
'--id', user, '--conf', conf, attempts=3,
|
||||
|
|
|
@ -279,7 +279,6 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
# NOTE(vish) exists is to make driver assume connecting worked
|
||||
self.stubs.Set(os.path, 'exists', lambda x: True)
|
||||
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
|
||||
name = 'volume-00000001'
|
||||
devs = ['/dev/disk/by-path/ip-%s-iscsi-%s-lun-2' % (self.location,
|
||||
self.iqn)]
|
||||
self.stubs.Set(self.fake_conn, 'get_all_block_devices', lambda: devs)
|
||||
|
@ -327,7 +326,6 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
mock_run_multipath, mock_device_name, mock_get_portals,
|
||||
mock_get_iqn):
|
||||
mock_run_multipath.side_effect = processutils.ProcessExecutionError
|
||||
name = 'volume-00000001'
|
||||
vol = {'id': 1, 'name': self.name}
|
||||
connection_info = self.iscsi_connection(vol, self.location,
|
||||
self.iqn)
|
||||
|
@ -520,7 +518,6 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
self.iqn)
|
||||
mpdev_filepath = '/dev/mapper/foo'
|
||||
connection_info['data']['device_path'] = mpdev_filepath
|
||||
target_portals = ['fake_portal1', 'fake_portal2']
|
||||
libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
|
||||
self.stubs.Set(libvirt_driver,
|
||||
'_get_target_portals_from_iscsiadm_output',
|
||||
|
@ -596,7 +593,6 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
connection_info = self.iscsi_connection(self.vol, self.location,
|
||||
self.iqn)
|
||||
mpdev_filepath = '/dev/mapper/foo'
|
||||
target_portals = ['fake_portal1', 'fake_portal2']
|
||||
libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
|
||||
self.stubs.Set(libvirt_driver,
|
||||
'_get_target_portals_from_iscsiadm_output',
|
||||
|
@ -625,7 +621,6 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
"dev": "vde",
|
||||
"type": "disk",
|
||||
}
|
||||
target_portals = ['fake_portal1', 'fake_portal2']
|
||||
libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
|
||||
self.stubs.Set(libvirt_driver,
|
||||
'_get_target_portals_from_iscsiadm_output',
|
||||
|
@ -646,7 +641,6 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
name0 = 'volume-00000000'
|
||||
location0 = '10.0.2.15:3260'
|
||||
iqn0 = 'iqn.2010-10.org.iser.openstack:%s' % name0
|
||||
vol0 = {'id': 0, 'name': name0}
|
||||
dev0 = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-0' % (location0, iqn0)
|
||||
name = 'volume-00000001'
|
||||
location = '10.0.2.15:3260'
|
||||
|
@ -663,7 +657,6 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
"dev": "vde",
|
||||
"type": "disk",
|
||||
}
|
||||
target_portals = ['fake_portal1', 'fake_portal2']
|
||||
libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
|
||||
self.stubs.Set(libvirt_driver,
|
||||
'_get_target_portals_from_iscsiadm_output',
|
||||
|
@ -939,7 +932,6 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
conf = libvirt_driver.connect_volume(connection_info,
|
||||
self.disk_info)
|
||||
tree = conf.format_dom()
|
||||
dev_str = '/dev/disk/by-path/pci-0000:05:00.2-fc-0x%s-lun-1' % wwn
|
||||
self.assertEqual(tree.get('type'), 'block')
|
||||
self.assertEqual(tree.find('./source').get('dev'),
|
||||
multipath_devname)
|
||||
|
|
|
@ -614,8 +614,6 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
|
|||
|
||||
self._create_vm()
|
||||
inst_file_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid)
|
||||
cache = ('[%s] vmware_base/%s/%s.vmdk' %
|
||||
(self.ds, self.fake_image_uuid, self.fake_image_uuid))
|
||||
self.assertTrue(vmwareapi_fake.get_file(inst_file_path))
|
||||
self._cached_files_exist()
|
||||
|
||||
|
@ -1603,7 +1601,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
|
|||
|
||||
def test_get_vnc_console_noport(self):
|
||||
self._create_vm()
|
||||
fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0]
|
||||
vmwareapi_fake._get_objects("VirtualMachine").objects
|
||||
self.assertRaises(exception.ConsoleTypeUnavailable,
|
||||
self.conn.get_vnc_console,
|
||||
self.context,
|
||||
|
|
|
@ -34,7 +34,6 @@ class fake_session(object):
|
|||
def _wait_for_task(self, task_ref):
|
||||
task_info = self._call_method('module', "get_dynamic_property",
|
||||
task_ref, "Task", "info")
|
||||
task_name = task_info.name
|
||||
if task_info.state == 'success':
|
||||
return task_info
|
||||
else:
|
||||
|
|
|
@ -84,7 +84,7 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
|
|||
with contextlib.nested(
|
||||
mock.patch.object(ds_util, 'get_sub_folders',
|
||||
fake_get_sub_folders)
|
||||
) as (_get_sub_folders):
|
||||
):
|
||||
self.exists = True
|
||||
ts = self._imagecache._get_timestamp('fake-ds-browser',
|
||||
'fake-ds-path')
|
||||
|
@ -113,7 +113,7 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
|
|||
with contextlib.nested(
|
||||
mock.patch.object(vim_util, 'get_dynamic_property',
|
||||
fake_get_dynamic_property)
|
||||
) as _get_dynamic:
|
||||
):
|
||||
self.fake_called = 0
|
||||
self.assertEqual({}, self._imagecache._ds_browser)
|
||||
browser = self._imagecache._get_ds_browser('fake-ds-ref')
|
||||
|
|
|
@ -104,7 +104,6 @@ class VBDTestCase(stubs.XenAPITestBaseNoDB):
|
|||
|
||||
@mock.patch.object(utils, 'synchronized')
|
||||
def test_vbd_plug_check_synchronized(self, mock_synchronized):
|
||||
session = mock.Mock()
|
||||
self.session.VBD.plug("vbd_ref", "vm_ref")
|
||||
mock_synchronized.assert_called_once_with("xenapi-vbd-vm_ref")
|
||||
|
||||
|
@ -115,6 +114,5 @@ class VBDTestCase(stubs.XenAPITestBaseNoDB):
|
|||
|
||||
@mock.patch.object(utils, 'synchronized')
|
||||
def test_vbd_plug_check_synchronized(self, mock_synchronized):
|
||||
session = mock.Mock()
|
||||
self.session.VBD.unplug("vbd_ref", "vm_ref")
|
||||
mock_synchronized.assert_called_once_with("xenapi-vbd-vm_ref")
|
||||
|
|
|
@ -77,8 +77,8 @@ class TestBittorrentStore(stubs.XenAPITestBaseNoDB):
|
|||
'bittorrent', 'download_vhd', **params)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
vdis = self.store.download_image(
|
||||
self.context, self.session, 'fake_image_uuid')
|
||||
self.store.download_image(self.context, self.session,
|
||||
'fake_image_uuid')
|
||||
|
||||
self.mox.VerifyAll()
|
||||
|
||||
|
|
|
@ -85,8 +85,8 @@ class TestGlanceStore(stubs.XenAPITestBaseNoDB):
|
|||
self.session.call_plugin_serialized('glance', 'download_vhd', **params)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
vdis = self.store.download_image(
|
||||
self.context, self.session, 'fake_image_uuid')
|
||||
self.store.download_image(self.context, self.session,
|
||||
'fake_image_uuid')
|
||||
|
||||
self.mox.VerifyAll()
|
||||
|
||||
|
@ -126,8 +126,8 @@ class TestGlanceStore(stubs.XenAPITestBaseNoDB):
|
|||
error = self.session.XenAPI.Failure(details=error_details)
|
||||
mock_call_plugin_serialized.side_effect = [error, "success"]
|
||||
|
||||
vdis = self.store.download_image(
|
||||
self.context, self.session, 'fake_image_uuid')
|
||||
self.store.download_image(self.context, self.session,
|
||||
'fake_image_uuid')
|
||||
|
||||
mock_call_plugin_serialized.assert_has_calls(calls)
|
||||
mock_log_debug.assert_has_calls(log_calls, any_order=True)
|
||||
|
|
|
@ -55,7 +55,7 @@ class GlanceImageTestCase(test.NoDBTestCase):
|
|||
self.assertEqual('metadata', image.meta)
|
||||
|
||||
def test_meta_caching(self):
|
||||
image_service = self._stub_out_glance_services()
|
||||
self._stub_out_glance_services()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
image = self._get_image()
|
||||
|
|
|
@ -49,7 +49,6 @@ class TestUploadToGlanceAsRawTgz(test.NoDBTestCase):
|
|||
def test_upload_image(self):
|
||||
store = vdi_through_dev.UploadToGlanceAsRawTgz(
|
||||
'context', 'session', 'instance', ['vdi0', 'vdi1'], 'id')
|
||||
image_service = self.mox.CreateMock(glance.GlanceImageService)
|
||||
self.mox.StubOutWithMock(store, '_perform_upload')
|
||||
self.mox.StubOutWithMock(store, '_get_vdi_ref')
|
||||
self.mox.StubOutWithMock(vdi_through_dev, 'glance')
|
||||
|
@ -119,7 +118,7 @@ class TestUploadToGlanceAsRawTgz(test.NoDBTestCase):
|
|||
|
||||
self.mox.ReplayAll()
|
||||
|
||||
result = store._get_virtual_size()
|
||||
store._get_virtual_size()
|
||||
|
||||
def test__create_pipe(self):
|
||||
store = vdi_through_dev.UploadToGlanceAsRawTgz(
|
||||
|
|
|
@ -1540,7 +1540,6 @@ class ScanSrTestCase(VMUtilsTestBase):
|
|||
details = ['SR_BACKEND_FAILURE_40', "", "", ""]
|
||||
|
||||
session.XenAPI.Failure = FakeException
|
||||
sr_scan_call_count = 0
|
||||
|
||||
def fake_call_xenapi(*args):
|
||||
fake_call_xenapi.count += 1
|
||||
|
|
|
@ -195,7 +195,6 @@ class VolumeAttachTestCase(test.NoDBTestCase):
|
|||
sr_ref = 'sr_ref'
|
||||
vdi_uuid = '2'
|
||||
vdi_ref = 'vdi_ref'
|
||||
vbd_ref = 'vbd_ref'
|
||||
connection_data = {'vdi_uuid': vdi_uuid}
|
||||
connection_info = {'data': connection_data,
|
||||
'driver_volume_type': 'iscsi'}
|
||||
|
|
|
@ -238,8 +238,7 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBaseNoDB):
|
|||
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
||||
vm = xenapi_fake.create_vm(self.instance['name'], 'Running')
|
||||
conn_info = self._make_connection_info()
|
||||
result = conn.attach_volume(
|
||||
None, conn_info, self.instance, '/dev/sdc')
|
||||
conn.attach_volume(None, conn_info, self.instance, '/dev/sdc')
|
||||
|
||||
# check that the VM has a VBD attached to it
|
||||
# Get XenAPI record for VBD
|
||||
|
@ -318,7 +317,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
|
|||
# Instance VDI
|
||||
vdi1 = xenapi_fake.create_vdi('instance-aaaa', None,
|
||||
other_config={'nova_instance_uuid': 'aaaa'})
|
||||
vbd1 = xenapi_fake.create_vbd(vm, vdi1)
|
||||
xenapi_fake.create_vbd(vm, vdi1)
|
||||
# Only looks like instance VDI
|
||||
vdi2 = xenapi_fake.create_vdi('instance-bbbb', None)
|
||||
vbd2 = xenapi_fake.create_vbd(vm, vdi2)
|
||||
|
@ -407,7 +406,6 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
|
|||
|
||||
def test_get_vnc_console_for_rescue(self):
|
||||
instance = self._create_instance(obj=True)
|
||||
session = get_session()
|
||||
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
||||
rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
|
||||
'Running')
|
||||
|
@ -967,7 +965,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
|
|||
# Instance id = 2 will use vlan network (see db/fakes.py)
|
||||
ctxt = self.context.elevated()
|
||||
self.network.conductor_api = conductor_api.LocalAPI()
|
||||
instance = self._create_instance(2, False)
|
||||
self._create_instance(2, False)
|
||||
networks = self.network.db.network_get_all(ctxt)
|
||||
with mock.patch('nova.objects.network.Network._from_db_object'):
|
||||
for network in networks:
|
||||
|
@ -1230,8 +1228,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
|
|||
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
||||
# Unrescue expects the original instance to be powered off
|
||||
conn.power_off(instance)
|
||||
rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
|
||||
'Running')
|
||||
xenapi_fake.create_vm(instance['name'] + '-rescue', 'Running')
|
||||
conn.unrescue(instance, None)
|
||||
|
||||
def test_unrescue_not_in_rescue(self):
|
||||
|
@ -1779,7 +1776,6 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
|
|||
def test_migrate_rollback_when_resize_down_fs_fails(self):
|
||||
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
|
||||
vmops = conn._vmops
|
||||
virtapi = vmops._virtapi
|
||||
|
||||
self.mox.StubOutWithMock(vmops, '_resize_ensure_vm_is_shutdown')
|
||||
self.mox.StubOutWithMock(vmops, '_apply_orig_vm_name_label')
|
||||
|
@ -1983,7 +1979,7 @@ class XenAPIHostTestCase(stubs.XenAPITestBase):
|
|||
def test_host_state_vcpus_used(self):
|
||||
stats = self.conn.get_host_stats(True)
|
||||
self.assertEqual(stats['vcpus_used'], 0)
|
||||
vm = xenapi_fake.create_vm(self.instance['name'], 'Running')
|
||||
xenapi_fake.create_vm(self.instance['name'], 'Running')
|
||||
stats = self.conn.get_host_stats(True)
|
||||
self.assertEqual(stats['vcpus_used'], 4)
|
||||
|
||||
|
@ -2032,15 +2028,13 @@ class XenAPIHostTestCase(stubs.XenAPITestBase):
|
|||
False, 'off_maintenance')
|
||||
|
||||
def test_set_enable_host_enable(self):
|
||||
values = _create_service_entries(self.context, values={'nova':
|
||||
['host']})
|
||||
_create_service_entries(self.context, values={'nova': ['host']})
|
||||
self._test_host_action(self.conn.set_host_enabled, True, 'enabled')
|
||||
service = db.service_get_by_args(self.context, 'host', 'nova-compute')
|
||||
self.assertEqual(service.disabled, False)
|
||||
|
||||
def test_set_enable_host_disable(self):
|
||||
values = _create_service_entries(self.context, values={'nova':
|
||||
['host']})
|
||||
_create_service_entries(self.context, values={'nova': ['host']})
|
||||
self._test_host_action(self.conn.set_host_enabled, False, 'disabled')
|
||||
service = db.service_get_by_args(self.context, 'host', 'nova-compute')
|
||||
self.assertEqual(service.disabled, True)
|
||||
|
@ -2167,7 +2161,7 @@ class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
|
|||
self.stubs.Set(vm_utils, "_resize_part_and_fs",
|
||||
fake_resize_part_and_fs)
|
||||
|
||||
ctx = context.RequestContext(self.user_id, self.project_id)
|
||||
context.RequestContext(self.user_id, self.project_id)
|
||||
session = get_session()
|
||||
|
||||
disk_image_type = vm_utils.ImageType.DISK_VHD
|
||||
|
@ -2283,7 +2277,7 @@ class XenAPIGenerateLocal(stubs.XenAPITestBase):
|
|||
|
||||
def assertCalled(self, instance,
|
||||
disk_image_type=vm_utils.ImageType.DISK_VHD):
|
||||
ctx = context.RequestContext(self.user_id, self.project_id)
|
||||
context.RequestContext(self.user_id, self.project_id)
|
||||
session = get_session()
|
||||
|
||||
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
|
||||
|
@ -2714,11 +2708,11 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
|
|||
|
||||
admin_ctxt = context.get_admin_context()
|
||||
# add a rule and send the update message, check for 1 rule
|
||||
provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
|
||||
{'protocol': 'tcp',
|
||||
'cidr': '10.99.99.99/32',
|
||||
'from_port': 1,
|
||||
'to_port': 65535})
|
||||
db.provider_fw_rule_create(admin_ctxt,
|
||||
{'protocol': 'tcp',
|
||||
'cidr': '10.99.99.99/32',
|
||||
'from_port': 1,
|
||||
'to_port': 65535})
|
||||
self.fw.refresh_provider_fw_rules()
|
||||
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
|
||||
if rule.chain == 'provider']
|
||||
|
@ -3057,7 +3051,6 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
|
|||
aggr = self.api.add_host_to_aggregate(self.context,
|
||||
aggr['id'], host)
|
||||
# let's mock the fact that the aggregate is in error!
|
||||
status = {'operational_state': pool_states.ERROR}
|
||||
expected = self.api.remove_host_from_aggregate(self.context,
|
||||
aggr['id'],
|
||||
values[fake_zone][0])
|
||||
|
@ -3403,12 +3396,6 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
|
|||
self.stubs.Set(self.conn._vmops, "_make_plugin_call",
|
||||
fake_make_plugin_call)
|
||||
|
||||
dest_check_data = {'block_migration': True,
|
||||
'migrate_data': {
|
||||
'destination_sr_ref': None,
|
||||
'migrate_send_data': None
|
||||
}}
|
||||
|
||||
self.assertRaises(exception.MigrationError,
|
||||
self.conn.check_can_live_migrate_source,
|
||||
self.context, {'host': 'host'},
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
#
|
||||
# iBoot Power Driver
|
||||
|
||||
from nova import context as nova_context
|
||||
from nova import exception
|
||||
from nova.openstack.common.gettextutils import _
|
||||
from nova.openstack.common import importutils
|
||||
|
@ -68,7 +67,6 @@ class IBootManager(base.PowerManager):
|
|||
self.password = str(node['pm_password'])
|
||||
instance = kwargs.pop('instance', {})
|
||||
self.node_name = instance.get('hostname', "")
|
||||
context = nova_context.get_admin_context()
|
||||
self.state = None
|
||||
self.conn = None
|
||||
|
||||
|
|
|
@ -240,7 +240,7 @@ class Tilera(base.NodeDriver):
|
|||
kernel
|
||||
./fs_node_id/
|
||||
"""
|
||||
image_info = get_tftp_image_info(instance)
|
||||
get_tftp_image_info(instance)
|
||||
(root_mb, swap_mb) = get_partition_sizes(instance)
|
||||
tilera_nfs_path = get_tilera_nfs_path(node['id'])
|
||||
image_file_path = get_image_file_path(instance)
|
||||
|
@ -286,7 +286,7 @@ class Tilera(base.NodeDriver):
|
|||
bm_utils.unlink_without_raise(path)
|
||||
|
||||
try:
|
||||
macs = self._collect_mac_addresses(context, node)
|
||||
self._collect_mac_addresses(context, node)
|
||||
except db_exc.DBError:
|
||||
pass
|
||||
|
||||
|
|
|
@ -237,7 +237,6 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
|
|||
|
||||
def unfilter_instance(self, instance, network_info):
|
||||
"""Clear out the nwfilter rules."""
|
||||
instance_name = instance['name']
|
||||
for vif in network_info:
|
||||
nic_id = vif['address'].replace(':', '')
|
||||
instance_filter_name = self._instance_filter_name(instance, nic_id)
|
||||
|
|
|
@ -427,7 +427,6 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
|
|||
super(LibvirtGenericVIFDriver,
|
||||
self).plug(instance, vif)
|
||||
|
||||
network = vif['network']
|
||||
iface_id = self.get_ovs_interfaceid(vif)
|
||||
dev = self.get_vif_devname(vif)
|
||||
linux_net.create_tap_dev(dev)
|
||||
|
|
|
@ -131,7 +131,6 @@ def get_sub_folders(session, ds_browser, ds_path):
|
|||
|
||||
If the path does not exist then an empty set is returned.
|
||||
"""
|
||||
client_factory = session._get_vim().client.factory
|
||||
search_task = session._call_method(
|
||||
session._get_vim(),
|
||||
"SearchDatastore_Task",
|
||||
|
|
|
@ -1168,8 +1168,6 @@ class FakeVim(object):
|
|||
source_vmref = args[0]
|
||||
source_vm_mdo = _get_vm_mdo(source_vmref)
|
||||
clone_spec = kwargs.get("spec")
|
||||
ds = _db_content["Datastore"].keys()[0]
|
||||
host = _db_content["HostSystem"].keys()[0]
|
||||
vm_dict = {
|
||||
"name": kwargs.get("name"),
|
||||
"ds": source_vm_mdo.get("datastore"),
|
||||
|
|
|
@ -195,7 +195,6 @@ class VMwareVMOps(object):
|
|||
(file_type, is_iso) = self._get_disk_format(image_meta)
|
||||
|
||||
client_factory = self._session._get_vim().client.factory
|
||||
service_content = self._session._get_vim().get_service_content()
|
||||
ds = vm_util.get_datastore_ref_and_name(self._session, self._cluster,
|
||||
datastore_regex=self._datastore_regex)
|
||||
data_store_ref = ds[0]
|
||||
|
@ -563,7 +562,6 @@ class VMwareVMOps(object):
|
|||
datastore, file_path):
|
||||
"""Attach cdrom to VM by reconfiguration."""
|
||||
instance_name = instance['name']
|
||||
instance_uuid = instance['uuid']
|
||||
client_factory = self._session._get_vim().client.factory
|
||||
devices = self._session._call_method(vim_util,
|
||||
"get_dynamic_property", vm_ref,
|
||||
|
|
|
@ -1490,7 +1490,7 @@ def _fetch_vhd_image(context, session, instance, image_id):
|
|||
|
||||
try:
|
||||
vdis = handler.download_image(context, session, image_id)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
default_handler = _default_download_handler()
|
||||
|
||||
# Using type() instead of isinstance() so instance of subclass doesn't
|
||||
|
@ -2128,7 +2128,7 @@ def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
|
|||
instance=instance)
|
||||
else:
|
||||
parent_ref = session.call_xenapi("VDI.get_by_uuid", parent_uuid)
|
||||
base_uuid = _get_vhd_parent_uuid(session, parent_ref)
|
||||
_get_vhd_parent_uuid(session, parent_ref)
|
||||
return
|
||||
|
||||
greenthread.sleep(CONF.xenserver.vhd_coalesce_poll_interval)
|
||||
|
|
2
tox.ini
2
tox.ini
|
@ -55,7 +55,7 @@ sitepackages = False
|
|||
# E125 is deliberately excluded. See https://github.com/jcrocholl/pep8/issues/126
|
||||
# The rest of the ignores are TODOs
|
||||
|
||||
ignore = E121,E122,E123,E124,E125,E126,E127,E128,E711,E712,F811,F841,H803
|
||||
ignore = E121,E122,E123,E124,E125,E126,E127,E128,E711,E712,F811,H803
|
||||
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools
|
||||
|
||||
[hacking]
|
||||
|
|
Loading…
Reference in New Issue