Fix flake8 checking issue in powervc-driver and Unittest

There are some flake8 issues in each component that are not based
 on coding style rules .
And There are some unittest issues in some component that need to
 be fixed

Change-Id: Ic9a3f2c3b779ced225a42f69a495a606cb62517e
Closes-Bug: #1350160
This commit is contained in:
Eric_Zhao 2014-07-30 04:20:42 -04:00
parent 001d845aa1
commit 5de8b9d735
25 changed files with 479 additions and 298 deletions

View File

@ -153,20 +153,20 @@ class PowerVCDriverTestCase(unittest.TestCase):
dic, "return vol doesn't match")
def test_delete_volume_success(self):
#fake volume which will be passed to driver service
# fake volume which will be passed to driver service
vol_info = {'id': 1234,
'size': 1}
volume = Volume(vol_info)
setattr(volume, 'volume_metadata', [VolumeMetadataWithPVCID("1234")])
#fake existed volume
# fake existed volume
existed_vol_info = {"status": 'available', 'id': 1234}
existed_volume_get = Volume(existed_vol_info)
#fake volume after delete
# fake volume after delete
after_delete_vol_info = {"status": '', 'id': 1234}
after_delete_volume_get = Volume(after_delete_vol_info)
#mock rest API
# mock rest API
PowerVCService._client.volumes.get = \
mock.MagicMock(side_effect=[existed_volume_get,
after_delete_volume_get])
@ -174,7 +174,7 @@ class PowerVCDriverTestCase(unittest.TestCase):
self.powervc_cinder_driver.delete_volume(volume)
def test_delete_volume_no_powervc_attribute_error(self):
#fake volume which will be passed to driver service
# fake volume which will be passed to driver service
vol_info = {'id': 1234, 'size': 1}
volume = Volume(vol_info)
self.assertRaises(AttributeError,

View File

@ -46,8 +46,6 @@ class FakeVolume():
def __dict__(self):
return None
__dict__ = fake_volume
class FakePowerVCService(PowerVCService):
def __init__(self):
@ -115,5 +113,5 @@ class Test(unittest.TestCase):
self.moxer.VerifyAll()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()

View File

@ -28,7 +28,7 @@ def _build_base_http_opts(config_section, opt_map):
# init client opts for powervc and openstack only once
if OS_OPTS is None:
OS_OPTS = _build_base_http_opts('openstack', {})
#support mulitple region on local openstack
# support multiple region on local openstack
OS_OPTS['region_name'] = CONF['openstack']['region_name']
if PVC_OPTS is None:
PVC_OPTS = _build_base_http_opts('powervc', {})

View File

@ -8,7 +8,6 @@ class Client(base.ClientExtension):
def __init__(self, client):
super(Client, self).__init__(client)
###################Images functions##########################
def listImages(self):
return [image for image in self.client.images.list()]
@ -24,7 +23,6 @@ class Client(base.ClientExtension):
def updateImage(self, image_id, **kwargs):
return self.client.images.update(image_id, **kwargs)
##################Image member functions#######################
def listImageMembers(self, image_id):
return [imageMember for imageMember in
self.client.image_members.list(image_id)]
@ -39,7 +37,6 @@ class Client(base.ClientExtension):
def createImageMember(self, image_id, member_id):
return self.client.image_members.create(image_id, member_id)
##################Image tag functions (v2 only)################
def updateImageTag(self, image_id, tag_value):
if self.client_version == 2:
return self.client.image_tags.update(image_id, tag_value)

View File

@ -21,7 +21,6 @@ class Client(base.ClientExtension):
def __init__(self, client):
super(Client, self).__init__(client)
self.manager = PVCServerManager(client)
self.servers = servers
self.hypervisors = hypervisors.HypervisorManager(client)
self.images = images.ImageManager(client)
self.flavors = flavors.FlavorManager(client)
@ -157,13 +156,13 @@ class PVCServerManager(servers.ServerManager):
body["server"]['hypervisor_hostname'] = kwargs["hypervisor"]
if userdata:
# RTC/172018 -- start
# comment out the following, already done by local OS nova client
# if hasattr(userdata, 'read'):
# userdata = userdata.read()
# RTC/172018 -- start
# comment out the following, already done by local OS nova client
# if hasattr(userdata, 'read'):
# userdata = userdata.read()
# userdata = strutils.safe_encode(userdata)
# body["server"]["user_data"] = base64.b64encode(userdata)
# userdata = strutils.safe_encode(userdata)
# body["server"]["user_data"] = base64.b64encode(userdata)
body["server"]["user_data"] = userdata
# RTC/172018 -- end
if meta:
@ -198,7 +197,7 @@ class PVCServerManager(servers.ServerManager):
personality = body['server']['personality'] = []
# RTC/172018 -- start
# comment out the following, already done by local OS nova client
#for filepath, file_or_string in files.items():
# for filepath, file_or_string in files.items():
# if hasattr(file_or_string, 'read'):
# data = file_or_string.read()
# else:

View File

@ -87,7 +87,7 @@ def patch_client(service_wrapper, client):
if remove_props is not None:
cur_props = image.keys()
new_props = kwargs.keys()
#NOTE(esheffield): Only remove props that currently exist on the
# NOTE(esheffield): Only remove props that currently exist on the
# image and are NOT in the properties being updated / added
props_to_remove = set(cur_props).intersection(
set(remove_props).difference(new_props))
@ -101,7 +101,7 @@ def patch_client(service_wrapper, client):
headers=hdrs,
body=image.patch)
#NOTE(bcwaldon): calling image.patch doesn't clear the changes, so
# NOTE(bcwaldon): calling image.patch doesn't clear the changes, so
# we need to fetch the image again to get a clean history. This is
# an obvious optimization for warlock
return org_image_controller.get(image_id)

View File

@ -49,8 +49,8 @@ class AbstractService(object):
def _lookup_client(self):
return importutils.import_class("%sclient.%s.client.Client" %
(self.base_name,
self.get_client_version()))
(self.base_name,
self.get_client_version()))
def _lookup_extension(self):
try:
@ -319,7 +319,7 @@ class ClientServiceCatalog(object):
if 'status' in version_meta and \
version_meta['status'] == 'CURRENT':
ver = version_meta['id']
if not ver in services:
if ver not in services:
services[ver] = []
services[ver].append(self._filter_host(url))
return services

View File

@ -572,7 +572,7 @@ class Utils(object):
# accessible_storage_templates to return
accessible_storage_templates = []
#filter out all the accessible storage template uuid
# filter out all the accessible storage template uuid
volume_types = scg.list_all_volume_types()
volume_type_ids = []
for vol_type in volume_types:

View File

@ -80,7 +80,7 @@ class TestPVCGlanceClient(unittest.TestCase):
self.assertEqual(self.pvc_gc.
client.
images.
api.calls,
client.calls,
expect)
def test_listImageMembers(self):
@ -105,7 +105,7 @@ class TestPVCGlanceClient(unittest.TestCase):
None)]
self.moxer.VerifyAll()
self.assertEqual(self.pvc_gc.client.image_members.
api.calls,
client.calls,
expect)
def test_getImageFile(self):

View File

@ -5,6 +5,8 @@ from mock import patch
import novaclient.tests.v1_1.test_servers as servers_testbox
import novaclient.tests.v1_1.test_flavors as flavors_testbox
import novaclient.tests.v1_1.test_hypervisors as hypervisors_testbox
from novaclient.tests.fixture_data import client as fixture_client
from novaclient.tests.fixture_data import servers as fixture_servers
from novaclient.tests.v1_1 import fakes
from novaclient.v1_1 import servers
from novaclient.v1_1 import flavors
@ -37,40 +39,36 @@ from powervc.common import utils as comm_utils
"""
class PVCFakeClient(fakes.FakeClient):
class PVCClientFixture(fixture_client.V1):
"""
This PVCFakeClient class extends the current nova FakeClient,
aiming to set the self.client variable to PVCFakeHTTPClient
"""
def __init__(self, *args, **kwargs):
fakes.FakeClient.__init__(self, *args, **kwargs)
self.client = PVCFakeHTTPClient(**kwargs)
def __init__(self, requests):
super(PVCClientFixture, self).__init__(requests)
def setUp(self):
super(PVCClientFixture, self).setUp()
self.client = delegate.new_composite_deletgate(
[ext_nova.Client(self.client), self.client])
class PVCFakeHTTPClient(fakes.FakeHTTPClient):
"""
This PVCFakeHTTPClient class extends the current nova FakeHTTPClient.
For all the HTTP requests in this class, it returns a fake json data
as specified beforehand instead of requesting to a real environment.
"""
def __init__(self, **kwargs):
fakes.FakeHTTPClient.__init__(self, **kwargs)
class PVCServersFixture(fixture_servers.V1):
def get_servers(self, **kw):
"""
Override the parent method to a new powerVC specified server.
"""
return (200, {}, {"servers": [
{'id': 1234, 'name': 'sample-server'},
{'id': 5678, 'name': 'powerVC sample-server'}
]})
def setUp(self):
super(PVCServersFixture, self).setUp()
get_servers = {
"servers": [
{'id': 1234, 'name': 'sample-server'},
{'id': 5678, 'name': 'powerVC sample-server'}
]
}
def get_servers_detail(self, **kw):
"""
Override the parent method to specify powerVC specified server
detail.
"""
return (200, {}, {"servers": [
self.requests.register_uri('GET', self.url(),
json=get_servers,
headers=self.json_headers)
get_servers_detail = {"servers": [
{
"id": 1234,
"name": "sample-server",
@ -167,7 +165,30 @@ class PVCFakeHTTPClient(fakes.FakeHTTPClient):
"Server Label": "DB 1"
}
}
]})
]}
self.requests.register_uri('GET', self.url('detail'),
json=get_servers_detail,
headers=self.json_headers)
class PVCFakeClient(fakes.FakeClient):
"""
This PVCFakeClient class extends the current nova FakeClient,
aiming to set the self.client variable to PVCFakeHTTPClient
"""
def __init__(self, *args, **kwargs):
fakes.FakeClient.__init__(self, *args, **kwargs)
self.client = PVCFakeHTTPClient(**kwargs)
class PVCFakeHTTPClient(fakes.FakeHTTPClient):
"""
This PVCFakeHTTPClient class extends the current nova FakeHTTPClient.
For all the HTTP requests in this class, it returns a fake json data
as specified beforehand instead of requesting to a real environment.
"""
def __init__(self, **kwargs):
fakes.FakeHTTPClient.__init__(self, **kwargs)
def get_flavors_detail(self, **kw):
"""
@ -183,22 +204,16 @@ class PVCFakeHTTPClient(fakes.FakeHTTPClient):
'OS-FLV-EXT-DATA:ephemeral': 20,
'os-flavor-access:is_public': False,
'links': {}},
{'id': 4, 'name': '1024 MB Server', 'ram': 1024, 'disk': 10,
'OS-FLV-EXT-DATA:ephemeral': 10,
'os-flavor-access:is_public': True,
'links': {}},
{'id': 'aa1', 'name': 'PowerVC 128 MB Server', 'ram': 5120,
'disk': 5678, 'OS-FLV-EXT-DATA:ephemeral': 0,
'os-flavor-access:is_public': True,
'links': {}}
]})
def get_os_hypervisors(self, **kw):
"""
Override the parent method to specify powerVC specified hypervisors
detail.
"""
return (200, {}, {"hypervisors": [
{'id': 1234, 'hypervisor_hostname': 'hyper1'},
{'id': 5678, 'hypervisor_hostname': 'hyper2'},
]})
def get_storage_connectivity_groups_f4b541cb_f418_4b4b_83b9_a8148650d4e9(
self, **kw):
"""
@ -325,15 +340,11 @@ class PVCNovaServersTest(servers_testbox.ServersTest):
ServersTest class to provide servers related UT cases.
"""
client_fixture_class = PVCClientFixture
data_fixture_class = PVCServersFixture
def setUp(self):
super(PVCNovaServersTest, self).setUp()
nova_fakeclient = PVCFakeClient('r', 'p', 's',
'http://localhost:5000/')
# delegate to nova extension class
nova_client = delegate.new_composite_deletgate(
[ext_nova.Client(nova_fakeclient), nova_fakeclient])
self.cs = nova_client
def tearDown(self):
super(PVCNovaServersTest, self).tearDown()
@ -352,7 +363,7 @@ class PVCNovaServersTest(servers_testbox.ServersTest):
"""
sl = self.cs.manager.list_all_servers()
print sl
self.cs.assert_called('GET', '/servers/detail')
self.assert_called('GET', '/servers/detail')
[self.assertTrue(isinstance(s, servers.Server)) for s in sl]
def test_list_instance_storage_viable_hosts(self):
@ -407,15 +418,10 @@ class PVCNovaHypervisorsTest(hypervisors_testbox.HypervisorsTest):
HypervisorsTest class to provide hypervisors related UT cases.
"""
client_fixture_class = PVCClientFixture
def setUp(self):
super(PVCNovaHypervisorsTest, self).setUp()
nova_fakeclient = PVCFakeClient('r', 'p', 's',
'http://localhost:5000/')
# delegate to nova extension class
nova_client = delegate.new_composite_deletgate(
[ext_nova.Client(nova_fakeclient), nova_fakeclient])
self.cs = nova_client
def tearDown(self):
super(PVCNovaHypervisorsTest, self).tearDown()
@ -464,7 +470,7 @@ class PVCNovaHypervisorsTest(hypervisors_testbox.HypervisorsTest):
result = self.cs.hypervisors.list()
print result
self.cs.assert_called('GET', '/os-hypervisors/detail')
self.assert_called('GET', '/os-hypervisors/detail')
for idx, hyper in enumerate(result):
self.compare_to_expected(expected[idx], hyper)

View File

@ -276,7 +276,7 @@ class PVCFakeNovaHTTPClient(novafakes.FakeHTTPClient):
]
})
def get_storage_connectivity_groups_f4b541cb_f418_4b4b_83b9_a8148650d4e9(
def get_storage_connectivity_groups_f4b541cb(
self, **kw):
"""
To get a fake detail storage_connectivity_group
@ -315,9 +315,323 @@ class PVCFakeNovaHTTPClient(novafakes.FakeHTTPClient):
"created_at": "2013-08-23 14:56:11.787465",
"enabled": True,
"auto_defined": True,
"id": "f4b541cb-f418-4b4b-83b9-a8148650d4e9"
"id": "f4b541cb"
}})
def get_storage_connectivity_groups_sdfb541cb_volumes(
self, **kw):
"""
To get a fake detail storage_connectivity_group
"""
return (200, {}, {
"volumes": [
{
"status": "available",
"display_name": "abcabc",
"attachments": [],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T07:22:20.729677",
"display_description": "None",
"volume_type": "shared_v7000_1-default",
"snapshot_id": "None",
"source_volid": "None",
"metadata": {},
"id": "ab41ee79-0f84-4f0d-976e-0aa122c8b89d",
"size": 1
},
{
"status": "in-use",
"display_name": "",
"attachments": [
{
"host_name": "None",
"device": "/dev/sda",
"server_id":
"103c1f3a-c2b2-4b90-80f8-cc2dd756b636",
"id": "2eab9958-16e1-4559-b3e6-e723360a4f27",
"volume_id":
"2eab9958-16e1-4559-b3e6-e723360a4f27"
}
],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T03:33:06.272849",
"os-vol-tenant-attr:tenant_id":
"2ec48b8ec30f4328bf95b8a5ad147c4b",
"display_description": "",
"os-vol-host-attr:host": "shared_v7000_1",
"health_status": {
"health_value": "OK"
},
"volume_type": "None",
"snapshot_id": "None",
"source_volid": "5f7c7d0d-b4e1-4ebc-80d4-4f1e8734f7e5",
"metadata": {
"instance_uuid":
"103c1f3a-c2b2-4b90-80f8-cc2dd756b636",
"is_boot_volume": "True"
},
"id": "2eab9958",
"size": 4
},
{
"status": "in-use",
"display_name": "",
"attachments": [
{
"host_name": "None",
"device": "/dev/sda",
"server_id":
"6a81591c-1671-43d1-b8c2-e0eb09cdab84",
"id": "6c21891a-ce09-4701-98d7-1c8d0c6872cf",
"volume_id": "6c21891a-ce09-4701-98d7-1c8d0c6872cf"
}
],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T03:32:30.922320",
"os-vol-tenant-attr:tenant_id":
"2ec48b8ec30f4328bf95b8a5ad147c4b",
"display_description": "",
"os-vol-host-attr:host": "shared_v7000_1",
"health_status": {
"health_value": "OK"
},
"volume_type": "None",
"snapshot_id": "None",
"source_volid": "5f7c7d0d-b4e1-4ebc-80d4-4f1e8734f7e5",
"metadata": {
"instance_uuid":
"6a81591c-1671-43d1-b8c2-e0eb09cdab84",
"is_boot_volume": "True"
},
"id": "6c21891a-ce09-4701-98d7-1c8d0c6872cf",
"size": 4
},
{
"status": "in-use",
"display_name": "",
"attachments": [
{
"host_name": "None",
"device": "/dev/sda",
"server_id":
"57625362-279c-4e02-bc9c-c6035904b2f1",
"id": "ff681131-9eab-4723-8261-6a80f8e3518d",
"volume_id": "ff681131-9eab-4723-8261-6a80f8e3518d"
}
],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T03:32:03.243339",
"os-vol-tenant-attr:tenant_id":
"2ec48b8ec30f4328bf95b8a5ad147c4b",
"display_description": "",
"os-vol-host-attr:host": "shared_v7000_1",
"health_status": {
"health_value": "OK"
},
"volume_type": "None",
"snapshot_id": "None",
"source_volid": "5f7c7d0d-b4e1-4ebc-80d4-4f1e8734f7e5",
"metadata": {
"instance_uuid":
"57625362-279c-4e02-bc9c-c6035904b2f1",
"is_boot_volume": "True"
},
"id": "ff681131-9eab-4723-8261-6a80f8e3518d",
"size": 4
}
]
})
def get_storage_connectivity_groups_sdfb541cb_volume_types(
self, **kw):
"""
To get a fake detail storage_connectivity_group
"""
return (200, {}, {
"volume-types": [
{
"extra_specs": {
"drivers:storage_pool": "P-NGP01-pool",
"capabilities:volume_backend_name": "shared_v7000_1",
"drivers:rsize": "-1"
},
"name": "shared_v7000_1-default",
"id": "6627888e-9f59-4996-8c22-5d528c3273f0"
},
{
"extra_specs": {},
"name": "dm-crypt",
"id": "a3ae95f6-4aab-4446-b1d2-0fc2f60a89bb"
},
{
"extra_specs": {},
"name": "LUKS",
"id": "291f81a2-591b-4164-b2b2-829abc935573"
}
]
})
def get_storage_connectivity_groups_f4b541cb_volumes(
self, **kw):
"""
To get a fake detail storage_connectivity_group
"""
return (200, {}, {
"volumes": [
{
"status": "available",
"display_name": "abcabc",
"attachments": [],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T07:22:20.729677",
"display_description": "None",
"volume_type": "shared_v7000_1-default",
"snapshot_id": "None",
"source_volid": "None",
"metadata": {},
"id": "ab41ee79-0f84-4f0d-976e-0aa122c8b89d",
"size": 1
},
{
"status": "in-use",
"display_name": "",
"attachments": [
{
"host_name": "None",
"device": "/dev/sda",
"server_id":
"103c1f3a-c2b2-4b90-80f8-cc2dd756b636",
"id": "2eab9958-16e1-4559-b3e6-e723360a4f27",
"volume_id":
"2eab9958-16e1-4559-b3e6-e723360a4f27"
}
],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T03:33:06.272849",
"os-vol-tenant-attr:tenant_id":
"2ec48b8ec30f4328bf95b8a5ad147c4b",
"display_description": "",
"os-vol-host-attr:host": "shared_v7000_1",
"health_status": {
"health_value": "OK"
},
"volume_type": "None",
"snapshot_id": "None",
"source_volid": "5f7c7d0d-b4e1-4ebc-80d4-4f1e8734f7e5",
"metadata": {
"instance_uuid":
"103c1f3a-c2b2-4b90-80f8-cc2dd756b636",
"is_boot_volume": "True"
},
"id": "2eab9958",
"size": 4
},
{
"status": "in-use",
"display_name": "",
"attachments": [
{
"host_name": "None",
"device": "/dev/sda",
"server_id":
"6a81591c-1671-43d1-b8c2-e0eb09cdab84",
"id": "6c21891a-ce09-4701-98d7-1c8d0c6872cf",
"volume_id": "6c21891a-ce09-4701-98d7-1c8d0c6872cf"
}
],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T03:32:30.922320",
"os-vol-tenant-attr:tenant_id":
"2ec48b8ec30f4328bf95b8a5ad147c4b",
"display_description": "",
"os-vol-host-attr:host": "shared_v7000_1",
"health_status": {
"health_value": "OK"
},
"volume_type": "None",
"snapshot_id": "None",
"source_volid": "5f7c7d0d-b4e1-4ebc-80d4-4f1e8734f7e5",
"metadata": {
"instance_uuid":
"6a81591c-1671-43d1-b8c2-e0eb09cdab84",
"is_boot_volume": "True"
},
"id": "6c21891a-ce09-4701-98d7-1c8d0c6872cf",
"size": 4
},
{
"status": "in-use",
"display_name": "",
"attachments": [
{
"host_name": "None",
"device": "/dev/sda",
"server_id":
"57625362-279c-4e02-bc9c-c6035904b2f1",
"id": "ff681131-9eab-4723-8261-6a80f8e3518d",
"volume_id": "ff681131-9eab-4723-8261-6a80f8e3518d"
}
],
"availability_zone": "nova",
"bootable": False,
"created_at": "2013-08-30T03:32:03.243339",
"os-vol-tenant-attr:tenant_id":
"2ec48b8ec30f4328bf95b8a5ad147c4b",
"display_description": "",
"os-vol-host-attr:host": "shared_v7000_1",
"health_status": {
"health_value": "OK"
},
"volume_type": "None",
"snapshot_id": "None",
"source_volid": "5f7c7d0d-b4e1-4ebc-80d4-4f1e8734f7e5",
"metadata": {
"instance_uuid":
"57625362-279c-4e02-bc9c-c6035904b2f1",
"is_boot_volume": "True"
},
"id": "ff681131-9eab-4723-8261-6a80f8e3518d",
"size": 4
}
]
})
def get_storage_connectivity_groups_f4b541cb_volume_types(
self, **kw):
"""
To get a fake detail storage_connectivity_group
"""
return (200, {}, {
"volume-types": [
{
"extra_specs": {
"drivers:storage_pool": "P-NGP01-pool",
"capabilities:volume_backend_name": "shared_v7000_1",
"drivers:rsize": "-1"
},
"name": "shared_v7000_1-default",
"id": "6627888e-9f59-4996-8c22-5d528c3273f0"
},
{
"extra_specs": {},
"name": "dm-crypt",
"id": "a3ae95f6-4aab-4446-b1d2-0fc2f60a89bb"
},
{
"extra_specs": {},
"name": "LUKS",
"id": "291f81a2-591b-4164-b2b2-829abc935573"
}
]
})
def get_storage_connectivity_groups(self, **kw):
"""
To return a fake storage_connectivity_groups
@ -325,11 +639,11 @@ class PVCFakeNovaHTTPClient(novafakes.FakeHTTPClient):
return (200, {}, {"storage_connectivity_groups": [
{
"display_name": "Auto-SCG for Registered SAN",
"id": "f4b541cb-f418-4b4b-83b9-a8148650d4e9"
"id": "f4b541cb"
},
{
"display_name": "SCG sample",
"id": "sdfb541cb-f418-4b4b-3129-a814865023fs"
"id": "sdfb541cb"
}
]})
@ -367,7 +681,7 @@ class PVCFakeNovaHTTPClient(novafakes.FakeHTTPClient):
"created_at": "2013-08-23 14:56:11.787465",
"enabled": True,
"auto_defined": True,
"id": "f4b541cb-f418-4b4b-83b9-a8148650d4e9"
"id": "f4b541cb"
},
{
"auto_add_vios": True,
@ -398,7 +712,7 @@ class PVCFakeNovaHTTPClient(novafakes.FakeHTTPClient):
"created_at": "2013-08-23 14:56:11.787465",
"enabled": True,
"auto_defined": True,
"id": "sdfb541cb-f418-4b4b-3129-a814865023fs"
"id": "sdfb541cb"
}
]})
@ -730,8 +1044,8 @@ class UtilsFakeTest(utils.TestCase):
self.utils._cinderclient = cinder_client
self.utils.scg_cache = SCGCache(nova_client)
self.scg_id_list = ['sdfb541cb-f418-4b4b-3129-a814865023fs',
'f4b541cb-f418-4b4b-83b9-a8148650d4e9']
self.scg_id_list = ['sdfb541cb',
'f4b541cb']
self.scg_name_list = ['Auto-SCG for Registered SAN',
'SCG Sample']
@ -756,7 +1070,7 @@ class UtilsFakeTest(utils.TestCase):
def test_get_scg_accessible_storage_providers_1(self):
accessible_storage_providers = \
self.utils.get_scg_accessible_storage_providers(
"f4b541cb_f418_4b4b_83b9_a8148650d4e9")
"f4b541cb")
self.assertEqual(accessible_storage_providers[0].storage_hostname,
"shared_v7000_1")
@ -792,7 +1106,7 @@ class UtilsFakeTest(utils.TestCase):
def test_get_scg_accessible_storage_templates_1(self):
accessible_storage_templates = \
self.utils.get_scg_accessible_storage_templates(
"f4b541cb_f418_4b4b_83b9_a8148650d4e9")
"f4b541cb")
# Shoud return the storage template which in the accessible
# storage providers
self.assertEqual(accessible_storage_templates[0].name,
@ -811,7 +1125,7 @@ class UtilsFakeTest(utils.TestCase):
def test_get_scg_accessible_volumes_1(self):
scg_accessible_volumes = \
self.utils.get_scg_accessible_volumes(
"f4b541cb_f418_4b4b_83b9_a8148650d4e9")
"f4b541cb")
# Shoud return the volume which in the accessible
# storage templates
self.assertEqual(scg_accessible_volumes[0].id,
@ -864,7 +1178,7 @@ class UtilsFakeTest(utils.TestCase):
def test_get_scg_id_by_scgName_1(self):
scg_id = self.utils.\
get_scg_id_by_scgName("Auto-SCG for Registered SAN")
self.assertEqual(scg_id, "f4b541cb-f418-4b4b-83b9-a8148650d4e9")
self.assertEqual(scg_id, "f4b541cb")
def test_get_scg_id_by_scgName_2(self):
scg_id = self.utils.\

View File

@ -54,7 +54,9 @@ CONF.register_opts(agent_opts, "AGENT")
class PowerVCNeutronAgent(object):
"""This is the main PowerVC Neutron agent class"""
"""
This is the main PowerVC Neutron agent class
"""
def __init__(self):
self.end_thread = False
@ -109,9 +111,6 @@ class PowerVCNeutronAgent(object):
self)
self._setup_rpc()
#==============================================================================
# Generate DB stats string
#==============================================================================
def _generate_db_stats(self):
net_creating, net_active, net_deleting = self.db.get_network_stats()
sub_creating, sub_active, sub_deleting = self.db.get_subnet_stats()
@ -127,17 +126,13 @@ class PowerVCNeutronAgent(object):
port_deleting)
return '(n:{0}, s:{1}, p:{2})'.format(stat_n, stat_s, stat_p)
#==============================================================================
# Handle network create
#==============================================================================
def _handle_local_network_create(self, network):
net_id = network.get('id')
db_net = self.db.get_network(local_id=net_id)
if db_net:
LOG.info(_("DB entry for local network %s already exists"), net_id)
return
#verify that if local network has no subnet, not handle it.
# verify that if local network has no subnet, not handle it.
if not utils.network_has_subnet(network):
# No subnet, but maybe one was created when this event was queued
# up waiting to be processed. Refresh with current network
@ -204,10 +199,6 @@ class PowerVCNeutronAgent(object):
LOG.info(_("PowerVC network is not allowed: %s"),
network.get('name'))
#==============================================================================
# Handle network update
#==============================================================================
def _handle_local_network_update(self, network):
net_id = network.get('id')
db_net = self.db.get_network(local_id=net_id)
@ -253,10 +244,6 @@ class PowerVCNeutronAgent(object):
else:
LOG.info(_("Network changes do not need to be updated"))
#==============================================================================
# Handle network delete
#==============================================================================
def _handle_local_network_delete(self, net_id):
db_net = self.db.get_network(local_id=net_id)
if not db_net:
@ -296,10 +283,6 @@ class PowerVCNeutronAgent(object):
return
self.db.delete_network(db_net)
#==============================================================================
# Handle subnet create
#==============================================================================
def _handle_local_subnet_create(self, subnet):
local_id = subnet.get('id')
db_sub = self.db.get_subnet(local_id=local_id)
@ -371,10 +354,6 @@ class PowerVCNeutronAgent(object):
if new_sub:
self.db.set_subnet_local_id(db_sub, new_sub.get('id'))
#==============================================================================
# Handle subnet update
#==============================================================================
def _handle_local_subnet_update(self, subnet):
local_id = subnet.get('id')
db_sub = self.db.get_subnet(local_id=local_id)
@ -421,10 +400,6 @@ class PowerVCNeutronAgent(object):
else:
LOG.info(_("Subnet changes do not need to be updated"))
#==============================================================================
# Handle subnet delete
#==============================================================================
def _handle_local_subnet_delete(self, sub_id):
db_sub = self.db.get_subnet(local_id=sub_id)
if not db_sub:
@ -510,10 +485,6 @@ class PowerVCNeutronAgent(object):
# No port left
return False
#==============================================================================
# Handle port create
#==============================================================================
def _handle_local_port_create(self, port):
local_id = port.get('id')
db_port = self.db.get_port(local_id=local_id)
@ -592,10 +563,6 @@ class PowerVCNeutronAgent(object):
if new_port:
self.db.set_port_local_id(db_port, new_port.get('id'))
#==============================================================================
# Handle port update
#==============================================================================
def _handle_local_port_update(self, port):
local_id = port.get('id')
db_port = self.db.get_port(local_id=local_id)
@ -640,10 +607,6 @@ class PowerVCNeutronAgent(object):
else:
LOG.info(_("Port changes do not need to be updated"))
#==============================================================================
# Handle port delete
#==============================================================================
def _handle_local_port_delete(self, port_id):
db_port = self.db.get_port(local_id=port_id)
if not db_port:
@ -737,20 +700,12 @@ class PowerVCNeutronAgent(object):
return
self.db.delete_port(db_port)
#==============================================================================
# Register handlers routines
#==============================================================================
def _register_handler(self, event_os, event_type, handler):
key = event_type
if event_os:
key = event_os + ':' + event_type
self.handlers[key] = handler
#==============================================================================
# Handle event
#==============================================================================
def _handle_event(self, event):
event_os = event.get(constants.EVENT_OS)
event_type = event.get(constants.EVENT_TYPE)
@ -769,10 +724,6 @@ class PowerVCNeutronAgent(object):
return
return handler(event_obj)
#==============================================================================
# Queue event for procesing by the daemon loop
#==============================================================================
def queue_event(self, event_os, event_type, event_obj):
event = {}
event[constants.EVENT_OS] = event_os
@ -780,10 +731,6 @@ class PowerVCNeutronAgent(object):
event[constants.EVENT_OBJECT] = event_obj
self.event_q.put(event)
#==============================================================================
# Setup RPC routine
#==============================================================================
def _setup_rpc(self):
"""
set up RPC support
@ -796,12 +743,10 @@ class PowerVCNeutronAgent(object):
self.conn.consume_in_threads()
LOG.info(_("RPC listener created"))
#==============================================================================
# Synchronize all Neutron objects
#==============================================================================
def _synchronize(self, default_target=LOCAL_OS):
"""Main synchronize routine"""
"""
Main synchronize routine
"""
start = time.time()
LOG.info(_("Synchronizing all networks/subnets/ports..."))
self._synchronize_networks(default_target)
@ -813,10 +758,6 @@ class PowerVCNeutronAgent(object):
LOG.info(_("Full sync elapsed time: %s %s"), elapsed, db_stats)
self.retry_sync = time.time() + self.polling_interval
#==============================================================================
# Synchronize networks
#==============================================================================
def _synchronize_networks(self, target=LOCAL_OS):
pvc_nets = self.pvc.get_networks()
local_nets = self.local.get_networks()
@ -869,12 +810,12 @@ class PowerVCNeutronAgent(object):
if db_net:
# DB entry for this local network already exists
continue
#if local network has no subnet, not handle it.
# if local network has no subnet, not handle it.
if not utils.network_has_subnet(local_net):
LOG.info(_("Local network %s has no subnet"),
local_net.get('name'))
continue
#if local network has subnet, verify if the subnet is mappable
# if local network has subnet, verify if the subnet is mappable
if not utils.network_has_mappable_subnet(self.local,
local_net):
LOG.info(_("Local network %s has no mappable subnet"),
@ -951,10 +892,6 @@ class PowerVCNeutronAgent(object):
self.db.set_network_pvc_id(db_net, pvc_id)
continue
#==============================================================================
# Synchronize subnets
#==============================================================================
def _synchronize_subnets(self, target=LOCAL_OS):
pvc_subnets = self.pvc.get_subnets()
local_subnets = self.local.get_subnets()
@ -1090,10 +1027,6 @@ class PowerVCNeutronAgent(object):
self.db.set_subnet_pvc_id(db_sub, pvc_id)
continue
#==============================================================================
# Synchronize ports
#==============================================================================
def _synchronize_ports(self, target=LOCAL_OS):
pvc_ports = self.pvc.get_ports()
local_ports = self.local.get_ports()
@ -1281,17 +1214,13 @@ class PowerVCNeutronAgent(object):
self.db.set_port_pvc_id(db_port, pvc_id)
continue
#==============================================================================
# RPC methods
#==============================================================================
def set_device_id_on_port_by_pvc_instance_uuid(self,
db_api,
device_id,
pvc_ins_uuid):
"""
Query the ports by pvc instance uuid, and set its
local instance id(device_id).
Query the ports by pvc instance uuid, and set its
local instance id(device_id).
"""
local_ids = []
pvc_ports = self.pvc.get_ports_by_instance_uuid(pvc_ins_uuid)
@ -1317,12 +1246,10 @@ class PowerVCNeutronAgent(object):
LOG.debug(_("Set device_id for %s with %s"), pvc_id, device_id)
return local_ids
#==============================================================================
# Main loop of the agent
#==============================================================================
def _process_event_queue(self):
"""Main loop for the agent"""
"""
Main loop for the agent
"""
while not self.end_thread:
try:
# Perform a full synchronization of all neutron objects
@ -1363,10 +1290,6 @@ class PowerVCNeutronAgent(object):
# handling an event
pass
#==============================================================================
# Main loop of the agent
#==============================================================================
def daemon_loop(self):
# Start a thread here to process the event queue. If the event queue
# is called from the main thread, incoming RPC requests are delayed
@ -1391,10 +1314,6 @@ class PowerVCNeutronAgent(object):
LOG.info(_("Worker thread is dead. Exiting"))
#==============================================================================
# Main routine
#==============================================================================
def main():
try:
LOG.info(_("-" * 80))

View File

@ -13,10 +13,7 @@ LOG = logging.getLogger(__name__)
LIST_COLUMNS = ['status', 'local_id', 'pvc_id', 'sync_key']
#==============================================================================
# RPC client
#==============================================================================
class RpcClient(RpcProxy):
BASE_RPC_API_VERSION = '1.0'

View File

@ -11,10 +11,7 @@ from powervc.neutron.db import powervc_db_v2
LOG = logging.getLogger(__name__)
#==============================================================================
# RPC callback
#==============================================================================
class PVCRpcCallbacks(object):
"""
RPC callbacks for nova driver calling this agent.

View File

@ -143,10 +143,6 @@ class Client(neutron_client_bindings.Client):
port_id = payload.get('port_id')
self.agent.queue_event(self.os, event, port_id)
#==============================================================================
# Local OS - Utility routines using other clients (Nova, Glance)
#==============================================================================
def get_power_vm_mapping(self):
"""
Return dict with PowerVC to local instance uuid mappings

View File

@ -6,16 +6,11 @@ Created on Aug 2, 2013
@author: John Kasperski
'''
#==============================================================================
# Device owner value for Neutron ports we create
#==============================================================================
POWERVC_DEVICE_OWNER = 'network:IBM SmartCloud'
RSVD_PORT_PREFIX = 'pvc:'
#==============================================================================
# Mapping enum values
#==============================================================================
OBJ_TYPE_NETWORK = 'Network'
OBJ_TYPE_SUBNET = 'Subnet'
OBJ_TYPE_PORT = 'Port'
@ -26,10 +21,7 @@ STATUS_DELETING = 'Deleting'
MAX_UPDATE_DATA_LENGTH = 512
#==============================================================================
# Neutron network fields (that we care about)
#==============================================================================
NETWORK_CREATE_FIELDS = ['name',
'shared',
'provider:network_type',
@ -38,10 +30,7 @@ NETWORK_CREATE_FIELDS = ['name',
NETWORK_UPDATE_FIELDS = ['name',
'shared']
#==============================================================================
# Neutron subnet fields (that we care about)
#==============================================================================
SUBNET_CREATE_FIELDS = ['name',
'ip_version',
'cidr',
@ -54,19 +43,13 @@ SUBNET_UPDATE_FIELDS = ['name',
'dns_nameservers',
'enable_dhcp']
#==============================================================================
# Neutron port fields (that we care about)
#==============================================================================
PORT_CREATE_FIELDS = ['name',
'mac_address',
'device_owner']
PORT_UPDATE_FIELDS = ['name']
#==============================================================================
# Qpid message handling
#==============================================================================
QPID_EXCHANGE = 'neutron'
QPID_TOPIC = 'notifications.info'

View File

@ -19,10 +19,6 @@ LOG = logging.getLogger(__name__)
CONF = cfg.CONF
#==============================================================================
# Utility routines
#==============================================================================
def _compare_objects(local_obj, pvc_obj, db_obj,
update_fields, default_target):
for field in update_fields:
@ -280,4 +276,3 @@ def translate_port_id(db, port_id, target_os):
if db_port:
return db_port.get('pvc_id')
return None

View File

@ -16,10 +16,6 @@ LOG = logging.getLogger(__name__)
class PowerVCAgentDB(object):
"""PowerVC Agent DB access methods"""
#==============================================================================
# Internal "object" methods
#==============================================================================
def __init__(self):
self.session = db_api.get_session()
db_api.configure_db()
@ -235,10 +231,6 @@ class PowerVCAgentDB(object):
LOG.warning(_("Object not found"))
return None
#==============================================================================
# Network methods
#==============================================================================
def create_network(self, net, sync_key, local_id=None, pvc_id=None):
return self._create_object(constants.OBJ_TYPE_NETWORK, sync_key,
utils.gen_network_update_data(net),
@ -268,10 +260,6 @@ class PowerVCAgentDB(object):
def set_network_update_data(self, obj, update_data):
return self._set_object_update_data(obj, update_data)
#==============================================================================
# Subnet methods
#==============================================================================
def create_subnet(self, sub, sync_key, local_id=None, pvc_id=None):
return self._create_object(constants.OBJ_TYPE_SUBNET, sync_key,
utils.gen_subnet_update_data(sub),
@ -301,10 +289,6 @@ class PowerVCAgentDB(object):
def set_subnet_update_data(self, obj, update_data):
return self._set_object_update_data(obj, update_data)
#==============================================================================
# Port methods
#==============================================================================
def create_port(self, port, sync_key, local_id=None, pvc_id=None):
return self._create_object(constants.OBJ_TYPE_PORT, sync_key,
utils.gen_port_update_data(port),

View File

@ -112,8 +112,8 @@ class ComputeServiceManager(object):
for remote_service in remote_services]
# First we kill the services for services no longer running
for local_service_name in self.services.keys():
#calls to greenthread.sleep have been added to all of the
#loops in this class because it has been long running
# calls to greenthread.sleep have been added to all of the
# loops in this class because it has been long running
eventlet.greenthread.sleep(0)
if local_service_name in remote_hostnames:
LOG.debug("Service %s still running compute-service "

View File

@ -9,7 +9,7 @@ PVC_ID = "pvc_id" # pvc instance uuid
PPC64 = "ppc64" # Found on the wiki
#hypervisor type
# hypervisor type
PVM_HYPERVISOR_TYPE = "powervm"
# Flavor constants
@ -38,7 +38,7 @@ EVENT_INSTANCE_VOLUME_ATTACH = 'compute.instance.volume.attach'
EVENT_INSTANCE_VOLUME_DETACH = 'compute.instance.volume.detach'
EVENT_INSTANCE_IMPORT = 'compute.instance.import.end'
#Volume id to to be updated by periodic sync
# Volume id to to be updated by periodic sync
INVALID_VOLUME_ID = '00000000-0000-0000-0000-000000000000'
LOCAL_PVC_PREFIX = 'powervm:'

View File

@ -292,10 +292,10 @@ class PowerVCCloudManager(manager.Manager):
base_options, unused_image, unused_flavor = \
self._translate_pvc_instance(context, pvc_instance, local_instance)
#In order to support the rename function in the Hosting OS, we will
#avoid the name of the instance is updated.
#In this situation, the name of the same instance will be different in
#the hosting OS and PowerVC.
# In order to support the rename function in the Hosting OS, we will
# avoid the name of the instance is updated.
# In this situation, the name of the same instance will be different in
# the hosting OS and PowerVC.
base_options['display_name'] = local_instance.get('display_name')
self.compute_api.update(context, local_instance, **base_options)
@ -516,15 +516,15 @@ class PowerVCCloudManager(manager.Manager):
def sync_volume_attachment(self, ctx, pvc_instance_id, local_instance):
"""Sync volume attachment information in BDM"""
#Since PowerVC server resp does not contain this info, it is needed now
#to retrieve it through sending another rest api to list
#volume attachments.
# Since PowerVC server resp does not contain this info, it is needed
# now to retrieve it through sending another rest api to list
# volume attachments.
attachments = self.driver.list_os_attachments(pvc_instance_id)
attached_volume_ids = []
attached_devices = []
for attachment in attachments:
#Each instance has a default volume,
#which is not what we want to show
# Each instance has a default volume,
# which is not what we want to show
if attachment.device != '/dev/sda':
block_device_map = {}
vol_id = self.cache_volume.get_by_id(attachment.id)
@ -547,7 +547,7 @@ class PowerVCCloudManager(manager.Manager):
block_device_map['destination_type'] = 'volume'
db_api.block_device_mapping_update_or_create(ctx,
block_device_map)
#Removing the BDMs are not in powervc
# Removing the BDMs are not in powervc
leftover_bdms = []
primitive_instance = obj_base.obj_to_primitive(local_instance)
local_attachments = self.conductor_api.\
@ -557,14 +557,14 @@ class PowerVCCloudManager(manager.Manager):
continue
local_volume_id = local_attachment['volume_id']
if local_volume_id in attached_volume_ids:
#this volume is still attached
# this volume is still attached
continue
if local_volume_id == constants.INVALID_VOLUME_ID:
#for invalid volume id, just check the device_name
# for invalid volume id, just check the device_name
local_device_name = local_attachment['device_name']
if local_device_name in attached_devices:
#this volume is still attached even it's
#volume id is not valid
# this volume is still attached even it's
# volume id is not valid
LOG.info(_("retain the volume with device name: %s, "
"although it's volume id is not valid "
"yet" % local_device_name))
@ -627,7 +627,7 @@ class PowerVCCloudManager(manager.Manager):
LOG.warning(_("Removing PowerVC instance %s in nova failed."),
local_instance.get('name'))
#delete network resource
# delete network resource
self.network_api.deallocate_for_instance(ctx, local_instance)
# Send notification about instance deletion due to sync operation
@ -653,7 +653,7 @@ class PowerVCCloudManager(manager.Manager):
# Get the uuid of pvc from the local instance.
metadata = self.compute_api.get_instance_metadata(ctx, local_instance)
if not constants.PVC_ID in metadata:
if constants.PVC_ID not in metadata:
return False
local_uuid = metadata[constants.PVC_ID]
@ -804,7 +804,7 @@ class PowerVCCloudManager(manager.Manager):
rtn = self._get_pvc_flavor(ctx, pvc_flavor_id, local_flavorid)
if rtn is None:
#Get the default flavor
# Get the default flavor
rtn = flavors.get_default_flavor()
return rtn
@ -888,10 +888,10 @@ class PowerVCCloudManager(manager.Manager):
if rtn is None:
for key in rtns.keys():
if memory <= rtns[key].get('memory_mb')\
and vcpus <= rtns[key].get('vcpus')\
and root_gb <= rtns[key].\
and vcpus <= rtns[key].get('vcpus')\
and root_gb <= rtns[key].\
get('root_gb')\
and ephemeral_gb <= rtns[key].\
and ephemeral_gb <= rtns[key].\
get('ephemeral_gb'):
rtn = rtns[key]
LOG.info(_("Return the"
@ -903,9 +903,9 @@ class PowerVCCloudManager(manager.Manager):
break
except Exception:
if rtn is None:
#Get the default flavor when can not get the
#corresponding flavor with the specified
#PowerVC instance
# Get the default flavor when can not get the
# corresponding flavor with the specified
# PowerVC instance
LOG.info("Get the default flavor")
rtn = flavors.get_default_flavor()
@ -1173,7 +1173,7 @@ class PowerVCCloudManager(manager.Manager):
return
vol_id = self.cache_volume.get_by_id(powervc_volume_id)
if vol_id is None:
#get the local volume info and cache it
# get the local volume info and cache it
LOG.debug(_("Get the local volume info for powervc volume with id:"
" %s") % powervc_volume_id)
local_volume_id = self.driver.\
@ -1181,7 +1181,7 @@ class PowerVCCloudManager(manager.Manager):
LOG.debug(_("Finished to get the local volume info for powervc "
"volume with id: %s") % powervc_volume_id)
if local_volume_id is None:
#continue to process, just log warning
# continue to process, just log warning
LOG.warning(_('volume does not exist locally for remote '
'volume: %s') % powervc_volume_id)
else:
@ -1283,10 +1283,10 @@ class PowerVCCloudManager(manager.Manager):
self._translate_pvc_instance(context, powervc_instance,
local_instance)
#In order to support the rename function in the Hosting OS, we will
#avoid the name of the instance is updated.
#In this situation, the name of the same instance will be different in
#the hosting OS and PowerVC.
# In order to support the rename function in the Hosting OS, we will
# avoid the name of the instance is updated.
# In this situation, the name of the same instance will be different in
# the hosting OS and PowerVC.
updated_instance['display_name'] = local_instance.get('display_name')
# Apply the VM and task state to the updated instance properties based
@ -1422,7 +1422,7 @@ class PowerVCCloudManager(manager.Manager):
# We only update the VM state for the following event types
vm_state_events = [constants.EVENT_INSTANCE_POWER_ON,
constants.EVENT_INSTANCE_POWER_OFF]
if not event_type in vm_state_events:
if event_type not in vm_state_events:
del updated_instance['vm_state']
return updated_instance

View File

@ -538,7 +538,7 @@ class PowerVCService(object):
# Check whether we can get the metadata from instance
key = 'metadata'
pvc_id = 0
if not key in instance:
if key not in instance:
LOG.info(_('Could not find the metadata from the instance.'))
server.id = pvc_id
return server
@ -559,7 +559,7 @@ class PowerVCService(object):
server.id = self._get_pvcid_from_metadata(instance)
return server
if metadatas == [] or not key in metadatas.keys():
if metadatas == [] or key not in metadatas.keys():
LOG.info(_('Could not find the pvc_id from the metadata.'))
server.id = pvc_id
return server
@ -651,7 +651,7 @@ class PowerVCService(object):
# extract activation data from instance
meta = instance._metadata
key_name = instance.key_name
#key_data = instance.key_data
# key_data = instance.key_data
config_drive = instance._config_drive
userdata = instance.user_data # already base64 encoded by local OS
@ -1091,9 +1091,9 @@ class PowerVCService(object):
image_name = image["name"]
glance_image_service = glance.get_default_image_service()
#nova is going to pick up the uuid from the image the instance was
#deployed from. We need to remove it to prevent treating this image
#as if it is the base deploy image
# nova is going to pick up the uuid from the image the instance was
# deployed from. We need to remove it to prevent treating this image
# as if it is the base deploy image
image_props = image["properties"]
if common_constants.POWERVC_UUID_KEY in image_props:
props = {'properties': {common_constants.POWERVC_UUID_KEY: None}}
@ -1158,7 +1158,7 @@ class PowerVCService(object):
pvc_server_dict = pvc_server.__dict__
current_host = pvc_server_dict['OS-EXT-SRV-ATTR:host']
LOG.debug(_('Original Host %s, Current Host %s') %
(orig_host, current_host))
(orig_host, current_host))
if (pvc_server.status != pvc_vm_states.MIGRATING and
current_host != orig_host):
LOG.info(_("Instance %s completed migration.") % pvc_server.id)

View File

@ -37,8 +37,8 @@ class ExtendedPowerVMAttributesController(wsgi.Controller):
if att in metadata:
health_status['health_value'] = metadata[att]
del metadata[att]
#TODO:Here can add other health_status property to construct
#dictionary data
# TODO:Here can add other health_status property to construct
# dictionary data
server[key] = health_status
for item in pvc_attrs:

View File

@ -29,8 +29,8 @@ class FakeOSFlavor():
self.os_flavor['name'] = "m1.small"
self.os_flavor['memory_mb'] = 2048
self.os_flavor['vcpus'] = 1
#FixMe Don't know what are proper values for the property "root_gb",
#"ephemeral_gb", "flavorid"
# FixMe Don't know what are proper values for the property "root_gb",
# "ephemeral_gb", "flavorid"
self.os_flavor['root_gb'] = 0
self.os_flavor['ephemeral_gb'] = 0
self.os_flavor['flavorid'] = "fakeflavorid"

View File

@ -204,7 +204,7 @@ class PowerVCDriverTestCase(test.NoDBTestCase):
service.get_instance = MagicMock(return_value=None)
cclmd = self._driver.check_can_live_migrate_destination
dest_compute_info = FakeHostStat().stat
self.assertRaises(mpcError, cclmd, None,
self.assertRaises(Invalid, cclmd, None,
os_instance, None, dest_compute_info)
def test_check_can_live_migrate_destination_invalid_state(self):
@ -215,7 +215,7 @@ class PowerVCDriverTestCase(test.NoDBTestCase):
cclmd = self._driver.check_can_live_migrate_destination
dest_compute_info = FakeHostStat().stat
service._is_live_migration_valid = MagicMock(return_value=False)
self.assertRaises(mpcError, cclmd, None,
self.assertRaises(Invalid, cclmd, None,
os_instance, None, dest_compute_info)
def test_check_can_live_migrate_destination_block_migration(self):
@ -250,7 +250,7 @@ class PowerVCDriverTestCase(test.NoDBTestCase):
os_instance.os_instance['metadata']['powervm:defer_placement'] = \
'false'
self.assertFalse(driver._check_defer_placement(os_instance))
#if the property is not presented
# if the property is not presented
del os_instance.os_instance['metadata']['powervm:defer_placement']
self.assertFalse(driver._check_defer_placement(os_instance))
@ -353,7 +353,6 @@ class PowerVCDriverTestCase(test.NoDBTestCase):
pvc_driver._service.longrun_loop_interval = 0
pvc_driver._service.longrun_initial_delay = 0
pvc_driver._service.max_tries = 2
#pvc_driver._service.
connection_info = {"serial": 1}
metadata = {"pvc_id": 1}
instance = {"metadata": metadata}
@ -389,9 +388,6 @@ class PowerVCDriverTestCase(test.NoDBTestCase):
pvc_driver.confirm_migration.assert_called_once_with(None,
instance, None)
pvc_driver._service.update_correct_host(context, instance)
pvc_driver.power_on.assert_called_once_with(context, instance,
network_info,
block_device_info)
def test_snapshot(self):
pvc_driver = self._driver
@ -437,7 +433,7 @@ class TestDriver(unittest.TestCase):
admin_password = None
PowerVCDriver._check_defer_placement = \
mock.MagicMock(return_value=False)
#mock database operation
# mock database operation
db.flavor_get = mock.MagicMock()
PowerVCDriver._get_pvc_network_info = mock.MagicMock()
self.powervc_driver._service.validate_update_scg = mock.MagicMock()
@ -475,7 +471,7 @@ class TestDriver(unittest.TestCase):
admin_password = None
PowerVCDriver._check_defer_placement = \
mock.MagicMock(return_value=False)
#mock database operation
# mock database operation
db.flavor_get = mock.MagicMock()
PowerVCDriver._get_pvc_network_info = mock.MagicMock()
self.powervc_driver._service.validate_update_scg = \
@ -581,7 +577,7 @@ class TestDriver(unittest.TestCase):
return image_meta
def fake_instance(self):
instance = dict()
instance = MagicMock()
instance['instance_type_id'] = 'fake_instace_type_id'
instance['host'] = 'fake_host'
instance['uuid'] = 'fake_uuid'
@ -633,8 +629,8 @@ class TestGetInstance(testtools.TestCase):
def test_get_instance_not_found(self):
"""When get instance find nothing."""
pvc_svc = mock.MagicMock()
pvc_svc.get_instance = mock.MagicMock(side_effect=
exceptions.NotFound(0))
pvc_svc.get_instance = \
mock.MagicMock(side_effect=exceptions.NotFound(0))
def pvc_drv_init_instance_not_found(self):
"""A fake init to replace PowerVCDriver.__init__."""
@ -668,13 +664,13 @@ class TestGetInfo(testtools.TestCase):
# monkey patch
PowerVCDriver.__init__ = mock.MagicMock(return_value=None)
self.pvc_drv = PowerVCDriver()
#restore from monkey patch, no need to wait until tearDown
# restore from monkey patch, no need to wait until tearDown
PowerVCDriver.__init__ = pvcdrv_init_copy
def test_get_info_success(self):
"""When everything is fine in the main path."""
self.pvc_drv.get_instance = mock.MagicMock(return_value=
self.pvc_instance)
self.pvc_drv.get_instance = \
mock.MagicMock(return_value=self.pvc_instance)
self.assertEqual(self.pvc_drv.get_info(self.os_instance),
{'state': 1,
'max_mem': 8192,
@ -686,8 +682,8 @@ class TestGetInfo(testtools.TestCase):
def test_get_info_instance_not_found_0(self):
"""When any exception occurred during fetch PVC LPAR instance."""
self.pvc_drv.get_instance = mock.MagicMock(side_effect=
exception.NotFound())
self.pvc_drv.get_instance = \
mock.MagicMock(side_effect=exception.NotFound())
self.assertRaises(exception.NotFound,
self.pvc_drv.get_info,
self.os_instance)