1) Update few comments where whitespace is missing after '#'

2) Update document so that copy right notice doesn't appear in generated document
3) Now using self.flag(...) instead of setting the flags like FLAGS.vmwareapi_username by direct assignment.
4) Added the missing double quote at the end a string in vim_util.py
This commit is contained in:
sateesh 2011-03-17 20:13:48 +05:30
parent f5ad4125d0
commit cbcda1ec46
6 changed files with 39 additions and 40 deletions

View File

@ -1,5 +1,4 @@
.. ..
Copyright (c) 2010 Citrix Systems, Inc. Copyright (c) 2010 Citrix Systems, Inc.
Copyright 2010 OpenStack LLC. Copyright 2010 OpenStack LLC.

View File

@ -43,15 +43,15 @@ class VMWareAPIVMTestCase(test.TestCase):
def setUp(self): def setUp(self):
super(VMWareAPIVMTestCase, self).setUp() super(VMWareAPIVMTestCase, self).setUp()
self.flags(vmwareapi_host_ip='test_url',
vmwareapi_host_username='test_username',
vmware_host_password='test_pass')
self.manager = manager.AuthManager() self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake', self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True) admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake') self.project = self.manager.create_project('fake', 'fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager) self.network = utils.import_object(FLAGS.network_manager)
self.stubs = stubout.StubOutForTesting() self.stubs = stubout.StubOutForTesting()
FLAGS.vmwareapi_host_ip = 'test_url'
FLAGS.vmwareapi_host_username = 'test_username'
FLAGS.vmwareapi_host_password = 'test_pass'
vmwareapi_fake.reset() vmwareapi_fake.reset()
db_fakes.stub_out_db_instance_api(self.stubs) db_fakes.stub_out_db_instance_api(self.stubs)
stubs.set_stubs(self.stubs) stubs.set_stubs(self.stubs)

View File

@ -48,8 +48,8 @@ def log_db_contents(msg=None):
def reset(): def reset():
"""Resets the db contents.""" """Resets the db contents."""
for c in _CLASSES: for c in _CLASSES:
#We fake the datastore by keeping the file references as a list of # We fake the datastore by keeping the file references as a list of
#names in the db # names in the db
if c == 'files': if c == 'files':
_db_content[c] = [] _db_content[c] = []
else: else:
@ -206,7 +206,7 @@ class VirtualMachine(ManagedObject):
setting of the Virtual Machine object. setting of the Virtual Machine object.
""" """
try: try:
#Case of Reconfig of VM to attach disk # Case of Reconfig of VM to attach disk
controller_key = val.deviceChange[1].device.controllerKey controller_key = val.deviceChange[1].device.controllerKey
filename = val.deviceChange[1].device.backing.fileName filename = val.deviceChange[1].device.backing.fileName
@ -223,7 +223,7 @@ class VirtualMachine(ManagedObject):
self.set("config.hardware.device", [disk, controller]) self.set("config.hardware.device", [disk, controller])
except Exception: except Exception:
#Case of Reconfig of VM to set extra params # Case of Reconfig of VM to set extra params
self.set("config.extraConfig", val.extraConfig) self.set("config.extraConfig", val.extraConfig)
@ -406,14 +406,14 @@ def _remove_file(file_path):
"""Removes a file reference from the db.""" """Removes a file reference from the db."""
if _db_content.get("files", None) is None: if _db_content.get("files", None) is None:
raise exception.NotFound(_("No files have been added yet")) raise exception.NotFound(_("No files have been added yet"))
#Check if the remove is for a single file object or for a folder # Check if the remove is for a single file object or for a folder
if file_path.find(".vmdk") != -1: if file_path.find(".vmdk") != -1:
if file_path not in _db_content.get("files"): if file_path not in _db_content.get("files"):
raise exception.NotFound(_("File- '%s' is not there in the " raise exception.NotFound(_("File- '%s' is not there in the "
"datastore") % file_path) "datastore") % file_path)
_db_content.get("files").remove(file_path) _db_content.get("files").remove(file_path)
else: else:
#Removes the files in the folder and the folder too from the db # Removes the files in the folder and the folder too from the db
for file in _db_content.get("files"): for file in _db_content.get("files"):
if file.find(file_path) != -1: if file.find(file_path) != -1:
try: try:
@ -639,15 +639,15 @@ class FakeVim(object):
for obj in objs: for obj in objs:
try: try:
obj_ref = obj.obj obj_ref = obj.obj
#This means that we are doing a search for the managed # This means that we are doing a search for the managed
#dataobects of the type in the inventory # dataobjects of the type in the inventory
if obj_ref == "RootFolder": if obj_ref == "RootFolder":
for mdo_ref in _db_content[type]: for mdo_ref in _db_content[type]:
mdo = _db_content[type][mdo_ref] mdo = _db_content[type][mdo_ref]
#Create a temp Managed object which has the same ref # Create a temp Managed object which has the same ref
#as the parent object and copies just the properties # as the parent object and copies just the properties
#asked for. We need .obj along with the propSet of # asked for. We need .obj along with the propSet of
#just the properties asked for # just the properties asked for
temp_mdo = ManagedObject(mdo.objName, mdo.obj) temp_mdo = ManagedObject(mdo.objName, mdo.obj)
for prop in properties: for prop in properties:
temp_mdo.set(prop, mdo.get(prop)) temp_mdo.set(prop, mdo.get(prop))

View File

@ -39,9 +39,9 @@ class NetworkHelper:
hostsystems = session._call_method(vim_util, "get_objects", hostsystems = session._call_method(vim_util, "get_objects",
"HostSystem", ["network"]) "HostSystem", ["network"])
vm_networks_ret = hostsystems[0].propSet[0].val vm_networks_ret = hostsystems[0].propSet[0].val
#Meaning there are no networks on the host. suds responds with a "" # Meaning there are no networks on the host. suds responds with a ""
#in the parent property field rather than a [] in the # in the parent property field rather than a [] in the
#ManagedObjectRefernce property field of the parent # ManagedObjectRefernce property field of the parent
if not vm_networks_ret: if not vm_networks_ret:
return None return None
vm_networks = vm_networks_ret.ManagedObjectReference vm_networks = vm_networks_ret.ManagedObjectReference
@ -59,18 +59,18 @@ class NetworkHelper:
Gets the vswitch associated with the physical network adapter Gets the vswitch associated with the physical network adapter
with the name supplied. with the name supplied.
""" """
#Get the list of vSwicthes on the Host System # Get the list of vSwicthes on the Host System
host_mor = session._call_method(vim_util, "get_objects", host_mor = session._call_method(vim_util, "get_objects",
"HostSystem")[0].obj "HostSystem")[0].obj
vswitches_ret = session._call_method(vim_util, vswitches_ret = session._call_method(vim_util,
"get_dynamic_property", host_mor, "get_dynamic_property", host_mor,
"HostSystem", "config.network.vswitch") "HostSystem", "config.network.vswitch")
#Meaning there are no vSwitches on the host. Shouldn't be the case, # Meaning there are no vSwitches on the host. Shouldn't be the case,
#but just doing code check # but just doing code check
if not vswitches_ret: if not vswitches_ret:
return return
vswitches = vswitches_ret.HostVirtualSwitch vswitches = vswitches_ret.HostVirtualSwitch
#Get the vSwitch associated with the network adapter # Get the vSwitch associated with the network adapter
for elem in vswitches: for elem in vswitches:
try: try:
for nic_elem in elem.pnic: for nic_elem in elem.pnic:
@ -87,7 +87,7 @@ class NetworkHelper:
physical_nics_ret = session._call_method(vim_util, physical_nics_ret = session._call_method(vim_util,
"get_dynamic_property", host_net_system_mor, "get_dynamic_property", host_net_system_mor,
"HostNetworkSystem", "networkInfo.pnic") "HostNetworkSystem", "networkInfo.pnic")
#Meaning there are no physical nics on the host # Meaning there are no physical nics on the host
if not physical_nics_ret: if not physical_nics_ret:
return False return False
physical_nics = physical_nics_ret.PhysicalNic physical_nics = physical_nics_ret.PhysicalNic
@ -139,11 +139,11 @@ class NetworkHelper:
"AddPortGroup", network_system_mor, "AddPortGroup", network_system_mor,
portgrp=add_prt_grp_spec) portgrp=add_prt_grp_spec)
except error_util.VimFaultException, exc: except error_util.VimFaultException, exc:
#There can be a race condition when two instances try # There can be a race condition when two instances try
#adding port groups at the same time. One succeeds, then # adding port groups at the same time. One succeeds, then
#the other one will get an exception. Since we are # the other one will get an exception. Since we are
#concerned with the port group being created, which is done # concerned with the port group being created, which is done
#by the other call, we can ignore the exception. # by the other call, we can ignore the exception.
if error_util.FAULT_ALREADY_EXISTS not in exc.fault_list: if error_util.FAULT_ALREADY_EXISTS not in exc.fault_list:
raise exception.Error(exc) raise exception.Error(exc)
LOG.debug(_("Created Port Group with name %s on " LOG.debug(_("Created Port Group with name %s on "

View File

@ -29,7 +29,7 @@ from suds.sudsobject import Property
from nova import flags from nova import flags
from nova.virt.vmwareapi import error_util from nova.virt.vmwareapi import error_util
RESP_NOT_XML_ERROR = 'Response is "text/html", not "text/xml' RESP_NOT_XML_ERROR = 'Response is "text/html", not "text/xml"'
CONN_ABORT_ERROR = 'Software caused connection abort' CONN_ABORT_ERROR = 'Software caused connection abort'
ADDRESS_IN_USE_ERROR = 'Address already in use' ADDRESS_IN_USE_ERROR = 'Address already in use'

View File

@ -45,46 +45,46 @@ def build_recursive_traversal_spec(client_factory):
""" """
visit_folders_select_spec = build_selection_spec(client_factory, visit_folders_select_spec = build_selection_spec(client_factory,
"visitFolders") "visitFolders")
#For getting to hostFolder from datacnetr # For getting to hostFolder from datacenter
dc_to_hf = build_traversal_spec(client_factory, "dc_to_hf", "Datacenter", dc_to_hf = build_traversal_spec(client_factory, "dc_to_hf", "Datacenter",
"hostFolder", False, "hostFolder", False,
[visit_folders_select_spec]) [visit_folders_select_spec])
#For getting to vmFolder from datacenter # For getting to vmFolder from datacenter
dc_to_vmf = build_traversal_spec(client_factory, "dc_to_vmf", "Datacenter", dc_to_vmf = build_traversal_spec(client_factory, "dc_to_vmf", "Datacenter",
"vmFolder", False, "vmFolder", False,
[visit_folders_select_spec]) [visit_folders_select_spec])
#For getting Host System to virtual machine # For getting Host System to virtual machine
h_to_vm = build_traversal_spec(client_factory, "h_to_vm", "HostSystem", h_to_vm = build_traversal_spec(client_factory, "h_to_vm", "HostSystem",
"vm", False, "vm", False,
[visit_folders_select_spec]) [visit_folders_select_spec])
#For getting to Host System from Compute Resource # For getting to Host System from Compute Resource
cr_to_h = build_traversal_spec(client_factory, "cr_to_h", cr_to_h = build_traversal_spec(client_factory, "cr_to_h",
"ComputeResource", "host", False, []) "ComputeResource", "host", False, [])
#For getting to datastore from Compute Resource # For getting to datastore from Compute Resource
cr_to_ds = build_traversal_spec(client_factory, "cr_to_ds", cr_to_ds = build_traversal_spec(client_factory, "cr_to_ds",
"ComputeResource", "datastore", False, []) "ComputeResource", "datastore", False, [])
rp_to_rp_select_spec = build_selection_spec(client_factory, "rp_to_rp") rp_to_rp_select_spec = build_selection_spec(client_factory, "rp_to_rp")
rp_to_vm_select_spec = build_selection_spec(client_factory, "rp_to_vm") rp_to_vm_select_spec = build_selection_spec(client_factory, "rp_to_vm")
#For getting to resource pool from Compute Resource # For getting to resource pool from Compute Resource
cr_to_rp = build_traversal_spec(client_factory, "cr_to_rp", cr_to_rp = build_traversal_spec(client_factory, "cr_to_rp",
"ComputeResource", "resourcePool", False, "ComputeResource", "resourcePool", False,
[rp_to_rp_select_spec, rp_to_vm_select_spec]) [rp_to_rp_select_spec, rp_to_vm_select_spec])
#For getting to child res pool from the parent res pool # For getting to child res pool from the parent res pool
rp_to_rp = build_traversal_spec(client_factory, "rp_to_rp", "ResourcePool", rp_to_rp = build_traversal_spec(client_factory, "rp_to_rp", "ResourcePool",
"resourcePool", False, "resourcePool", False,
[rp_to_rp_select_spec, rp_to_vm_select_spec]) [rp_to_rp_select_spec, rp_to_vm_select_spec])
#For getting to Virtual Machine from the Resource Pool # For getting to Virtual Machine from the Resource Pool
rp_to_vm = build_traversal_spec(client_factory, "rp_to_vm", "ResourcePool", rp_to_vm = build_traversal_spec(client_factory, "rp_to_vm", "ResourcePool",
"vm", False, "vm", False,
[rp_to_rp_select_spec, rp_to_vm_select_spec]) [rp_to_rp_select_spec, rp_to_vm_select_spec])
#Get the assorted traversal spec which takes care of the objects to # Get the assorted traversal spec which takes care of the objects to
#be searched for from the root folder # be searched for from the root folder
traversal_spec = build_traversal_spec(client_factory, "visitFolders", traversal_spec = build_traversal_spec(client_factory, "visitFolders",
"Folder", "childEntity", False, "Folder", "childEntity", False,
[visit_folders_select_spec, dc_to_hf, [visit_folders_select_spec, dc_to_hf,