diff --git a/bin/nova-manage b/bin/nova-manage index 46cb5b1bbff7..f2d945f1a7e9 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -1746,7 +1746,7 @@ class StorageManagerCommands(object): flav['description']) def flavor_create(self, label, desc): - # TODO flavor name must be unique + # TODO(renukaapte) flavor name must be unique try: db.sm_flavor_create(context.get_admin_context(), dict(label=label, @@ -1792,7 +1792,7 @@ class StorageManagerCommands(object): b['config_params'],) def backend_add(self, flavor_label, sr_type, *args): - # TODO Add backend_introduce. + # TODO(renukaapte) Add backend_introduce. ctxt = context.get_admin_context() params = dict(map(self._splitfun, args)) sr_uuid = utils.gen_uuid() diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index 8098832999ce..82259136ad88 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -64,7 +64,7 @@ class APIRequest(object): _error = _('Unsupported API request: controller = %(controller)s,' ' action = %(action)s') % locals() LOG.exception(_error) - # TODO: Raise custom exception, trap in apiserver, + # TODO(gundlach): Raise custom exception, trap in apiserver, # and reraise as 400 error. raise exception.InvalidRequest() diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 719a74bb9a58..040e8be0033a 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -277,7 +277,7 @@ def check_img_metadata_quota_limit(context, metadata): def dict_to_query_str(params): - # TODO: we should just use urllib.urlencode instead of this + # TODO(throughnothing): we should just use urllib.urlencode instead of this # But currently we don't work with urlencoded url's param_str = "" for key, val in params.iteritems(): diff --git a/nova/api/openstack/compute/contrib/virtual_storage_arrays.py b/nova/api/openstack/compute/contrib/virtual_storage_arrays.py index 168f82605717..6c8a58c21c2d 100644 --- a/nova/api/openstack/compute/contrib/virtual_storage_arrays.py +++ b/nova/api/openstack/compute/contrib/virtual_storage_arrays.py @@ -596,7 +596,7 @@ class VsaVCController(servers.Controller): def __init__(self): self.vsa_api = vsa_api.API() self.compute_api = compute.API() - self.vsa_id = None # VP-TODO: temporary ugly hack + self.vsa_id = None # VP-TODO(vladimir.p): temporary ugly hack super(VsaVCController, self).__init__() def _get_servers(self, req, is_detail): @@ -624,7 +624,7 @@ class VsaVCController(servers.Controller): LOG.audit(_("Index instances for VSA %s"), vsa_id) - self.vsa_id = vsa_id # VP-TODO: temporary ugly hack + self.vsa_id = vsa_id # VP-TODO(vladimir.p): temporary ugly hack result = super(VsaVCController, self).detail(req) self.vsa_id = None return result diff --git a/nova/image/glance.py b/nova/image/glance.py index aa6cee83ce72..93e6cf273028 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -322,7 +322,7 @@ class GlanceImageService(object): # NOTE(parthi): only allow image deletions if the user # is a member of the project owning the image, in case of # setup without keystone - # TODO Currently this access control breaks if + # TODO(parthi): Currently this access control breaks if # 1. Image is not owned by a project # 2. Deleting user is not bound a project properties = image_meta['properties'] diff --git a/nova/tests/baremetal/test_proxy_bare_metal.py b/nova/tests/baremetal/test_proxy_bare_metal.py index 30bc3595b679..3230438004f5 100644 --- a/nova/tests/baremetal/test_proxy_bare_metal.py +++ b/nova/tests/baremetal/test_proxy_bare_metal.py @@ -261,7 +261,7 @@ class ProxyBareMetalTestCase(test.TestCase): # Code under test conn = proxy.get_connection(True) - # TODO: this is not a very good fake instance + # TODO(mikalstill): this is not a very good fake instance info = conn.get_info({'name': 'instance-00000001'}) # Expected values diff --git a/nova/tests/rpc/common.py b/nova/tests/rpc/common.py index 4ccc8ef6cea8..87cb522c61d9 100644 --- a/nova/tests/rpc/common.py +++ b/nova/tests/rpc/common.py @@ -132,7 +132,8 @@ class BaseRpcTestCase(test.TestCase): """Calls echo in the passed queue""" LOG.debug(_("Nested received %(queue)s, %(value)s") % locals()) - # TODO: so, it will replay the context and use the same REQID? + # TODO(comstud): + # so, it will replay the context and use the same REQID? # that's bizarre. ret = self.rpc.call(context, queue, diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py index 242da09d9ef1..6549d6e96cd5 100644 --- a/nova/tests/test_auth.py +++ b/nova/tests/test_auth.py @@ -420,5 +420,5 @@ class AuthManagerDbTestCase(_AuthManagerBaseTestCase): if __name__ == "__main__": - # TODO: Implement use_fake as an option + # TODO(anotherjesse): Implement use_fake as an option unittest.main() diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 9a904d925bcb..83be6963d5fe 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -961,7 +961,7 @@ class LibvirtConnection(driver.ComputeDriver): def get_vnc_port_for_instance(instance_name): virt_dom = self._lookup_by_name(instance_name) xml = virt_dom.XMLDesc(0) - # TODO: use etree instead of minidom + # TODO(sleepsonthefloor): use etree instead of minidom dom = minidom.parseString(xml) for graphic in dom.getElementsByTagName('graphics'): diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 0b2f1fef2d59..232828e48106 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -436,7 +436,7 @@ class SessionBase(object): return def PIF_get_all_records_where(self, _1, _2): - # TODO (salvatore-orlando): filter table on _2 + # TODO(salvatore-orlando): filter table on _2 return _db_content['PIF'] def VM_get_xenstore_data(self, _1, vm_ref): diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py index 141700500d02..fb12b374e9e0 100644 --- a/nova/virt/xenapi/host.py +++ b/nova/virt/xenapi/host.py @@ -192,7 +192,7 @@ def _host_find(context, session, src, dst): """ # NOTE: this would be a lot simpler if nova-compute stored # FLAGS.host in the XenServer host's other-config map. - # TODO: improve according the note above + # TODO(armando-migliaccio): improve according the note above aggregate = db.aggregate_get_by_host(context, src) uuid = session.call_xenapi('host.get_record', dst)['uuid'] for compute_host, host_uuid in aggregate.metadetails.iteritems(): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index eb2022ab5b49..0d5322e11ee7 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1442,7 +1442,7 @@ class VMOps(object): def get_console_output(self, instance): """Return snapshot of console.""" - # TODO: implement this to fix pylint! + # TODO(armando-migliaccio): implement this to fix pylint! return 'FAKE CONSOLE OUTPUT of instance' def get_vnc_console(self, instance): diff --git a/nova/volume/xensm.py b/nova/volume/xensm.py index 128df98f9811..bf390cafb911 100644 --- a/nova/volume/xensm.py +++ b/nova/volume/xensm.py @@ -117,7 +117,7 @@ class XenSMDriver(nova.volume.driver.VolumeDriver): """Setup includes creating or introducing storage repos existing in the database and destroying deleted ones.""" - # TODO purge storage repos + # TODO(renukaapte) purge storage repos self.ctxt = ctxt self._create_storage_repos(ctxt) @@ -127,7 +127,7 @@ class XenSMDriver(nova.volume.driver.VolumeDriver): # For now the scheduling logic will be to try to fit the volume in # the first available backend. - # TODO better scheduling once APIs are in place + # TODO(renukaapte) better scheduling once APIs are in place sm_vol_rec = None backends = self.db.sm_backend_conf_get_all(self.ctxt) for backend in backends: @@ -191,7 +191,6 @@ class XenSMDriver(nova.volume.driver.VolumeDriver): def create_export(self, context, volume): """Exports the volume.""" - # !!! TODO pass def remove_export(self, context, volume): diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 1a03dfb1b943..dbe3607eac8d 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -295,7 +295,8 @@ class API(base.Base): vc_count = kwargs.get('vc_count', None) if vc_count is not None: - # VP-TODO: This request may want to update number of VCs + # VP-TODO(vladimir.p): + # This request may want to update number of VCs # Get number of current VCs and add/delete VCs appropriately vsa = self.get(context, vsa_id) vc_count = int(vc_count) @@ -317,13 +318,13 @@ class API(base.Base): add_cnt = vc_count - old_vc_count LOG.debug(_("Adding %(add_cnt)s VCs to VSA %(vsa_name)s."), locals()) - # VP-TODO: actual code for adding new VCs + # VP-TODO(vladimir.p): actual code for adding new VCs elif vc_count < old_vc_count: del_cnt = old_vc_count - vc_count LOG.debug(_("Deleting %(del_cnt)s VCs from VSA %(vsa_name)s."), locals()) - # VP-TODO: actual code for deleting extra VCs + # VP-TODO(vladimir.p): actual code for deleting extra VCs def _force_volume_delete(self, ctxt, volume): """Delete a volume, bypassing the check that it must be available."""