From 58c732b0dc4523f818cdca1c7278350ed3ca6802 Mon Sep 17 00:00:00 2001 From: Boris Pavlovic Date: Mon, 25 Sep 2017 14:15:57 -0700 Subject: [PATCH] Improve Rally Logging (part 3) - Remove translations Nobody is using translations for Rally and I don't think that anybody is going to use it. Target auditory for Rally are developers/operators which usually know well english. For me this looks like waste of resources, performance degradation (cause we are calling _()), complexity (+1 thing that you need to know) - Pass to log already formatted strings It's very bad because in case of wrong formatting, it doesn't fail instead just writes errors to the logs, as well information about trace is lost, so it's super hard to fix it Log wrapper doesn't allow to use LOG anymore for formatting strings All places are fixed - Improve logging of exceptions LOG.exception() already logs exception, which means it's bad idea to pass str(e) to it. Instead we should provide clear description of what happend. Improved few places to write warnings or exceptions in case of different level of logs. In few places just use LOG.exception - Part of log messages were improved and simplified Change-Id: Ie3f7b4e00c804b0aeee6dc117a98a24efa240340 --- rally/plugins/openstack/cleanup/manager.py | 44 ++++++------ rally/plugins/openstack/cleanup/resources.py | 3 +- .../plugins/openstack/context/api_versions.py | 12 ++-- .../openstack/context/ceilometer/samples.py | 7 +- .../openstack/context/cinder/volume_types.py | 2 +- .../plugins/openstack/context/ec2/servers.py | 12 ++-- .../openstack/context/glance/images.py | 17 ++--- .../openstack/context/keystone/roles.py | 15 ++-- .../openstack/context/keystone/users.py | 17 +++-- .../context/manila/manila_share_networks.py | 22 +++--- .../openstack/context/network/allow_ssh.py | 8 +-- .../openstack/context/network/networks.py | 4 +- .../openstack/context/neutron/lbaas.py | 7 +- .../plugins/openstack/context/nova/flavors.py | 11 +-- .../plugins/openstack/context/nova/servers.py | 14 ++-- .../openstack/context/quotas/quotas.py | 9 ++- .../context/sahara/sahara_cluster.py | 14 ++-- .../openstack/context/sahara/sahara_image.py | 4 +- .../context/sahara/sahara_job_binaries.py | 2 +- .../openstack/context/swift/objects.py | 21 +++--- .../openstack/context/vm/custom_image.py | 12 ++-- .../plugins/openstack/hook/fault_injection.py | 2 +- .../openstack/scenarios/cinder/utils.py | 8 +-- .../openstack/scenarios/glance/utils.py | 5 +- .../plugins/openstack/scenarios/heat/utils.py | 4 +- .../openstack/scenarios/keystone/utils.py | 5 +- .../openstack/scenarios/neutron/utils.py | 15 ++-- .../openstack/scenarios/nova/flavors.py | 5 +- .../plugins/openstack/scenarios/nova/utils.py | 11 ++- .../openstack/scenarios/sahara/clusters.py | 4 +- .../openstack/scenarios/sahara/jobs.py | 6 +- .../openstack/scenarios/sahara/utils.py | 27 ++++--- rally/plugins/openstack/scenarios/vm/utils.py | 7 +- .../plugins/openstack/scenarios/vm/vmtasks.py | 2 +- .../services/storage/cinder_common.py | 3 +- .../openstack/verification/tempest/config.py | 2 +- .../openstack/verification/tempest/context.py | 71 +++++++++---------- .../openstack/verification/tempest/manager.py | 5 +- rally/plugins/openstack/wrappers/network.py | 9 ++- .../plugins/openstack/cleanup/test_manager.py | 2 +- .../openstack/context/glance/test_images.py | 29 +++----- 41 files changed, 216 insertions(+), 263 deletions(-) diff --git a/rally/plugins/openstack/cleanup/manager.py b/rally/plugins/openstack/cleanup/manager.py index 688edd1016..44d80e3ec7 100644 --- a/rally/plugins/openstack/cleanup/manager.py +++ b/rally/plugins/openstack/cleanup/manager.py @@ -16,7 +16,6 @@ import time from rally.common import broker -from rally.common.i18n import _ from rally.common import logging from rally.common.plugin import discover from rally.common.plugin import plugin @@ -79,19 +78,19 @@ class SeekAndDestroy(object): } LOG.debug( - "Deleting %(service)s %(resource)s object %(name)s (%(uuid)s)", - msg_kw) + "Deleting %(service)s.%(resource)s object %(name)s (%(uuid)s)" + % msg_kw) try: rutils.retry(resource._max_attempts, resource.delete) except Exception as e: - msg_kw["reason"] = e - LOG.warning( - _("Resource deletion failed, max retries exceeded for " - "%(service)s.%(resource)s: %(uuid)s. Reason: %(reason)s") - % msg_kw) + msg = ("Resource deletion failed, max retries exceeded for " + "%(service)s.%(resource)s: %(uuid)s.") % msg_kw + if logging.is_debug(): - LOG.exception(e) + LOG.exception(msg) + else: + LOG.warning("%(msg)s Reason: %(e)s" % {"msg": msg, "e": e}) else: started = time.time() failures_count = 0 @@ -100,11 +99,10 @@ class SeekAndDestroy(object): if resource.is_deleted(): return except Exception as e: - LOG.warning( - _("Seems like %s.%s.is_deleted(self) method is broken " - "It shouldn't raise any exceptions.") + LOG.exception( + "Seems like %s.%s.is_deleted(self) method is broken " + "It shouldn't raise any exceptions." % (resource.__module__, type(resource).__name__)) - LOG.exception(e) # NOTE(boris-42): Avoid LOG spamming in case of bad # is_deleted() method @@ -115,9 +113,8 @@ class SeekAndDestroy(object): finally: rutils.interruptable_sleep(resource._interval) - LOG.warning(_("Resource deletion failed, timeout occurred for " - "%(service)s.%(resource)s: %(uuid)s.") - % msg_kw) + LOG.warning("Resource deletion failed, timeout occurred for " + "%(service)s.%(resource)s: %(uuid)s." % msg_kw) def _publisher(self, queue): """Publisher for deletion jobs. @@ -135,12 +132,11 @@ class SeekAndDestroy(object): try: for raw_resource in rutils.retry(3, manager.list): queue.append((admin, user, raw_resource)) - except Exception as e: - LOG.warning( - _("Seems like %s.%s.list(self) method is broken. " - "It shouldn't raise any exceptions.") + except Exception: + LOG.exception( + "Seems like %s.%s.list(self) method is broken. " + "It shouldn't raise any exceptions." % (manager.__module__, type(manager).__name__)) - LOG.exception(e) if self.admin and (not self.users or self.manager_cls._perform_for_admin_only): @@ -280,9 +276,9 @@ def cleanup(names=None, admin_required=None, admin=None, users=None, rutils.RandomNameGeneratorMixin): resource_classes.append(superclass) for manager in find_resource_managers(names, admin_required): - LOG.debug("Cleaning up %(service)s %(resource)s objects", - {"service": manager._service, - "resource": manager._resource}) + LOG.debug("Cleaning up %(service)s %(resource)s objects" + % {"service": manager._service, + "resource": manager._resource}) SeekAndDestroy(manager, admin, users, api_versions=api_versions, resource_classes=resource_classes, diff --git a/rally/plugins/openstack/cleanup/resources.py b/rally/plugins/openstack/cleanup/resources.py index 2f5dac0c28..181c23798a 100644 --- a/rally/plugins/openstack/cleanup/resources.py +++ b/rally/plugins/openstack/cleanup/resources.py @@ -445,8 +445,7 @@ class NeutronPort(NeutronMixin): except neutron_exceptions.PortNotFoundClient: # Port can be already auto-deleted, skip silently LOG.debug("Port %s was not deleted. Skip silently because " - "port can be already auto-deleted.", - self.id()) + "port can be already auto-deleted." % self.id()) @base.resource("neutron", "subnet", order=next(_neutron_order), diff --git a/rally/plugins/openstack/context/api_versions.py b/rally/plugins/openstack/context/api_versions.py index 9d63814fa0..a6d50715a1 100644 --- a/rally/plugins/openstack/context/api_versions.py +++ b/rally/plugins/openstack/context/api_versions.py @@ -12,7 +12,6 @@ import random -from rally.common.i18n import _ from rally.common import validation from rally import consts from rally import exceptions @@ -239,23 +238,22 @@ class OpenStackAPIVersions(context.Context): services_from_admin = None for client_name, conf in self.config.items(): if "service_type" in conf and conf["service_type"] not in services: - raise exceptions.ValidationError(_( - "There is no service with '%s' type in your environment.") + raise exceptions.ValidationError( + "There is no service with '%s' type in your environment." % conf["service_type"]) elif "service_name" in conf: if not self.context.get("admin", {}).get("credential"): raise exceptions.ContextSetupFailure( ctx_name=self.get_name(), - msg=_("Setting 'service_name' is allowed" - " only for 'admin' user.")) + msg="Setting 'service_name' is admin only operation.") if not services_from_admin: services_from_admin = dict( [(s.name, s.type) for s in admin_clients.keystone().services.list()]) if conf["service_name"] not in services_from_admin: raise exceptions.ValidationError( - _("There is no '%s' service in your environment") % - conf["service_name"]) + "There is no '%s' service in your environment" + % conf["service_name"]) # TODO(boris-42): Use separate key ["openstack"]["versions"] self.context["config"]["api_versions@openstack"][client_name][ diff --git a/rally/plugins/openstack/context/ceilometer/samples.py b/rally/plugins/openstack/context/ceilometer/samples.py index e3ae47bc52..7c63b2c457 100644 --- a/rally/plugins/openstack/context/ceilometer/samples.py +++ b/rally/plugins/openstack/context/ceilometer/samples.py @@ -16,7 +16,6 @@ import time from six import moves -from rally.common.i18n import _ from rally.common import logging from rally.common import utils as rutils from rally.common import validation @@ -111,12 +110,12 @@ class CeilometerSampleGenerator(context.Context): samples = scenario._create_samples(batch) except Exception: unsuccess += 1 - LOG.warning(_("Failed to store batch %d of Ceilometer samples" - " during context creation") % i) + LOG.warning("Failed to store batch %d of Ceilometer samples" + " during context creation" % i) if unsuccess > batches_allow_lose: raise exceptions.ContextSetupFailure( ctx_name=self.get_name(), - msg=_("Context failed to store too many batches of samples")) + msg="Context failed to store too many batches of samples") return samples diff --git a/rally/plugins/openstack/context/cinder/volume_types.py b/rally/plugins/openstack/context/cinder/volume_types.py index c7aa6942e2..e8e54cf4b9 100644 --- a/rally/plugins/openstack/context/cinder/volume_types.py +++ b/rally/plugins/openstack/context/cinder/volume_types.py @@ -44,7 +44,7 @@ class VolumeTypeGenerator(context.Context): admin_clients, name_generator=self.generate_random_name) self.context["volume_types"] = [] for vtype_name in self.config: - LOG.debug("Creating Cinder volume type %s", vtype_name) + LOG.debug("Creating Cinder volume type %s" % vtype_name) vtype = cinder_service.create_volume_type(vtype_name) self.context["volume_types"].append({"id": vtype.id, "name": vtype_name}) diff --git a/rally/plugins/openstack/context/ec2/servers.py b/rally/plugins/openstack/context/ec2/servers.py index b637fc80d3..230ad7c931 100644 --- a/rally/plugins/openstack/context/ec2/servers.py +++ b/rally/plugins/openstack/context/ec2/servers.py @@ -68,8 +68,7 @@ class EC2ServerGenerator(context.Context): for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): - LOG.debug("Booting servers for tenant %s ", - (user["tenant_id"])) + LOG.debug("Booting servers for tenant %s " % user["tenant_id"]) ec2_scenario = ec2_utils.EC2Scenario({ "user": user, "task": self.context["task"], @@ -77,10 +76,11 @@ class EC2ServerGenerator(context.Context): LOG.debug( "Calling _boot_servers with " - "image_id={image_id} flavor_name={flavor_name} " - "servers_per_tenant={servers_per_tenant}".format( - image_id=image_id, flavor_name=flavor["name"], - servers_per_tenant=self.config["servers_per_tenant"])) + "image_id=%(image_id)s flavor_name=%(flavor_name)s " + "servers_per_tenant=%(servers_per_tenant)s" + % {"image_id": image_id, + "flavor_name": flavor["name"], + "servers_per_tenant": self.config["servers_per_tenant"]}) servers = ec2_scenario._boot_servers( image_id, flavor["name"], self.config["servers_per_tenant"]) diff --git a/rally/plugins/openstack/context/glance/images.py b/rally/plugins/openstack/context/glance/images.py index 240e5d31b5..2686277d58 100644 --- a/rally/plugins/openstack/context/glance/images.py +++ b/rally/plugins/openstack/context/glance/images.py @@ -14,7 +14,6 @@ from oslo_config import cfg -from rally.common.i18n import _ from rally.common import logging from rally.common import utils as rutils from rally.common import validation @@ -132,23 +131,21 @@ class ImageGenerator(context.Context): image_args = self.config.get("image_args", {}) if "image_type" in self.config: - LOG.warning(_("The 'image_type' argument is deprecated " - "since Rally 0.10.0, use disk_format " - "arguments instead.")) + LOG.warning("The 'image_type' argument is deprecated since " + "Rally 0.10.0, use disk_format argument instead") if not disk_format: disk_format = self.config["image_type"] if "image_container" in self.config: - LOG.warning(_("The 'image_container' argument is deprecated " - "since Rally 0.10.0; use container_format " - "arguments instead")) + LOG.warning("The 'image_container' argument is deprecated since " + "Rally 0.10.0; use container_format argument instead") if not container_format: container_format = self.config["image_container"] if image_args: - LOG.warning(_("The 'image_args' argument is deprecated since " - "Rally 0.10.0; specify exact arguments in a root " - "section of context instead.")) + LOG.warning( + "The 'image_args' argument is deprecated since Rally 0.10.0; " + "specify arguments in a root section of context instead") if "is_public" in image_args: if "visibility" not in self.config: diff --git a/rally/plugins/openstack/context/keystone/roles.py b/rally/plugins/openstack/context/keystone/roles.py index 7c86889eaa..13eabd780d 100644 --- a/rally/plugins/openstack/context/keystone/roles.py +++ b/rally/plugins/openstack/context/keystone/roles.py @@ -16,7 +16,6 @@ from oslo_config import cfg from rally.common import broker -from rally.common.i18n import _ from rally.common import logging from rally.common import validation from rally import consts @@ -61,8 +60,8 @@ class RoleGenerator(context.Context): if str(def_role.name) == context_role: return def_role else: - raise exceptions.NotFoundException(_( - "There is no role with name `%s`") % context_role) + raise exceptions.NotFoundException( + "There is no role with name `%s`" % context_role) def _get_consumer(self, func_name): def consume(cache, args): @@ -85,10 +84,10 @@ class RoleGenerator(context.Context): role = self._get_role_object(context_role) roles_dict[role.id] = role.name LOG.debug("Adding role %(role_name)s having ID %(role_id)s " - "to all users using %(threads)s threads", - {"role_name": role.name, - "role_id": role.id, - "threads": threads}) + "to all users using %(threads)s threads" + % {"role_name": role.name, + "role_id": role.id, + "threads": threads}) for user in self.context["users"]: args = (role.id, user["id"], user["tenant_id"]) queue.append(args) @@ -102,7 +101,7 @@ class RoleGenerator(context.Context): def publish(queue): for role_id in self.context["roles"]: - LOG.debug("Removing role %s from all users", role_id) + LOG.debug("Removing role %s from all users" % role_id) for user in self.context["users"]: args = (role_id, user["id"], user["tenant_id"]) queue.append(args) diff --git a/rally/plugins/openstack/context/keystone/users.py b/rally/plugins/openstack/context/keystone/users.py index efc536e72d..b33c8fd715 100644 --- a/rally/plugins/openstack/context/keystone/users.py +++ b/rally/plugins/openstack/context/keystone/users.py @@ -19,7 +19,6 @@ import uuid from oslo_config import cfg from rally.common import broker -from rally.common.i18n import _ from rally.common import logging from rally.common import objects from rally.common import utils as rutils @@ -139,13 +138,13 @@ class UserGenerator(context.Context): use_sg, msg = network.wrap(clients, self).supports_extension( "security-group") if not use_sg: - LOG.debug("Security group context is disabled: %s", msg) + LOG.debug("Security group context is disabled: %s" % msg) return for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): with logging.ExceptionLogger( - LOG, _("Unable to delete default security group")): + LOG, "Unable to delete default security group"): uclients = osclients.Clients(user["credential"]) security_groups = uclients.neutron().list_security_groups() default = [sg for sg in security_groups["security_groups"] @@ -264,18 +263,18 @@ class UserGenerator(context.Context): """Create tenants and users, using the broker pattern.""" threads = self.config["resource_management_workers"] - LOG.debug("Creating %(tenants)d tenants using %(threads)s threads", - {"tenants": self.config["tenants"], "threads": threads}) + LOG.debug("Creating %(tenants)d tenants using %(threads)s threads" + % {"tenants": self.config["tenants"], "threads": threads}) self.context["tenants"] = self._create_tenants() if len(self.context["tenants"]) < self.config["tenants"]: raise exceptions.ContextSetupFailure( ctx_name=self.get_name(), - msg=_("Failed to create the requested number of tenants.")) + msg="Failed to create the requested number of tenants.") users_num = self.config["users_per_tenant"] * self.config["tenants"] - LOG.debug("Creating %(users)d users using %(threads)s threads", - {"users": users_num, "threads": threads}) + LOG.debug("Creating %(users)d users using %(threads)s threads" + % {"users": users_num, "threads": threads}) self.context["users"] = self._create_users() for user in self.context["users"]: self.context["tenants"][user["tenant_id"]]["users"].append(user) @@ -283,7 +282,7 @@ class UserGenerator(context.Context): if len(self.context["users"]) < users_num: raise exceptions.ContextSetupFailure( ctx_name=self.get_name(), - msg=_("Failed to create the requested number of users.")) + msg="Failed to create the requested number of users.") def use_existing_users(self): LOG.debug("Using existing users") diff --git a/rally/plugins/openstack/context/manila/manila_share_networks.py b/rally/plugins/openstack/context/manila/manila_share_networks.py index 32ae309705..b5913ea0e1 100644 --- a/rally/plugins/openstack/context/manila/manila_share_networks.py +++ b/rally/plugins/openstack/context/manila/manila_share_networks.py @@ -15,7 +15,6 @@ from oslo_config import cfg -from rally.common.i18n import _ from rally.common import logging from rally.common import utils from rally.common import validation @@ -89,9 +88,9 @@ class ShareNetworks(context.Context): def _setup_for_existing_users(self): if (self.config["use_share_networks"] and not self.config["share_networks"]): - msg = _("Usage of share networks was enabled but for deployment " - "with existing users share networks also should be " - "specified via arg 'share_networks'") + msg = ("Usage of share networks was enabled but for deployment " + "with existing users share networks also should be " + "specified via arg 'share_networks'") raise exceptions.ContextSetupFailure( ctx_name=self.get_name(), msg=msg) @@ -108,8 +107,8 @@ class ShareNetworks(context.Context): break break else: - msg = _("Provided tenant Name or ID '%s' was not found in " - "existing tenants.") % tenant_name_or_id + msg = ("Provided tenant Name or ID '%s' was not found in " + "existing tenants.") % tenant_name_or_id raise exceptions.ContextSetupFailure( ctx_name=self.get_name(), msg=msg) self.context["tenants"][tenant_id][CONTEXT_NAME] = {} @@ -131,9 +130,9 @@ class ShareNetworks(context.Context): if sn_name_or_id in (sn.id, sn.name): break else: - msg = _("Specified share network '%(sn)s' does not " - "exist for tenant '%(tenant_id)s'") % { - "sn": sn_name_or_id, "tenant_id": tenant_id} + msg = ("Specified share network '%(sn)s' does not " + "exist for tenant '%(tenant_id)s'" + % {"sn": sn_name_or_id, "tenant_id": tenant_id}) raise exceptions.ContextSetupFailure( ctx_name=self.get_name(), msg=msg) @@ -178,9 +177,8 @@ class ShareNetworks(context.Context): data["neutron_net_id"] = network["id"] data["neutron_subnet_id"] = network["subnets"][0] else: - LOG.warning(_( - "Can not determine network service provider. " - "Share network will have no data.")) + LOG.warning("Can't determine network service provider." + " Share network will have no data.") _setup_share_network(tenant_id, data) else: _setup_share_network(tenant_id, data) diff --git a/rally/plugins/openstack/context/network/allow_ssh.py b/rally/plugins/openstack/context/network/allow_ssh.py index 11cf462126..c4e1edb565 100644 --- a/rally/plugins/openstack/context/network/allow_ssh.py +++ b/rally/plugins/openstack/context/network/allow_ssh.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -from rally.common.i18n import _ from rally.common import logging from rally.common import utils from rally.common import validation @@ -97,7 +96,7 @@ class AllowSSH(context.Context): self, config=self.config) use_sg, msg = net_wrapper.supports_extension("security-group") if not use_sg: - LOG.info(_("Security group context is disabled: %s") % msg) + LOG.info("Security group context is disabled: %s" % msg) return secgroup_name = self.generate_random_name() @@ -109,7 +108,8 @@ class AllowSSH(context.Context): for user, tenant_id in utils.iterate_per_tenants( self.context["users"]): with logging.ExceptionLogger( - LOG, _("Unable to delete secgroup: %s.") % - user["secgroup"]["name"]): + LOG, + "Unable to delete security group: %s." + % user["secgroup"]["name"]): clients = osclients.Clients(user["credential"]) clients.neutron().delete_security_group(user["secgroup"]["id"]) diff --git a/rally/plugins/openstack/context/network/networks.py b/rally/plugins/openstack/context/network/networks.py index 41196df8eb..d83f245e08 100644 --- a/rally/plugins/openstack/context/network/networks.py +++ b/rally/plugins/openstack/context/network/networks.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -from rally.common.i18n import _ from rally.common import logging from rally.common import utils from rally.common import validation @@ -107,6 +106,5 @@ class Network(context.Context): for network in tenant_ctx.get("networks", []): with logging.ExceptionLogger( LOG, - _("Failed to delete network for tenant %s") - % tenant_id): + "Failed to delete network for tenant %s" % tenant_id): net_wrapper.delete_network(network) diff --git a/rally/plugins/openstack/context/neutron/lbaas.py b/rally/plugins/openstack/context/neutron/lbaas.py index 8619e90126..347d488052 100644 --- a/rally/plugins/openstack/context/neutron/lbaas.py +++ b/rally/plugins/openstack/context/neutron/lbaas.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -from rally.common.i18n import _ from rally.common import logging from rally.common import utils from rally.common import validation @@ -86,8 +85,8 @@ class Lbaas(context.Context): for pool in network.get("lb_pools", []): with logging.ExceptionLogger( LOG, - _("Failed to delete pool %(pool)s for tenant " - "%(tenant)s") % {"pool": pool["pool"]["id"], - "tenant": tenant_id}): + "Failed to delete pool %(pool)s for tenant " + "%(tenant)s" % {"pool": pool["pool"]["id"], + "tenant": tenant_id}): if self.config["lbaas_version"] == 1: net_wrapper.delete_v1_pool(pool["pool"]["id"]) diff --git a/rally/plugins/openstack/context/nova/flavors.py b/rally/plugins/openstack/context/nova/flavors.py index 672761fb13..fb97c2143d 100644 --- a/rally/plugins/openstack/context/nova/flavors.py +++ b/rally/plugins/openstack/context/nova/flavors.py @@ -84,18 +84,19 @@ class FlavorsGenerator(context.Context): flavor_config = FlavorConfig(**flavor_config) try: flavor = clients.nova().flavors.create(**flavor_config) - except nova_exceptions.Conflict as e: - LOG.warning("Using already existing flavor %s" % - flavor_config["name"]) + except nova_exceptions.Conflict: + msg = "Using existing flavor %s" % flavor_config["name"] if logging.is_debug(): - LOG.exception(e) + LOG.exception(msg) + else: + LOG.warning(msg) continue if extra_specs: flavor.set_keys(extra_specs) self.context["flavors"][flavor_config["name"]] = flavor.to_dict() - LOG.debug("Created flavor with id '%s'", flavor.id) + LOG.debug("Created flavor with id '%s'" % flavor.id) def cleanup(self): """Delete created flavors.""" diff --git a/rally/plugins/openstack/context/nova/servers.py b/rally/plugins/openstack/context/nova/servers.py index 4984710f54..ed44e894a9 100755 --- a/rally/plugins/openstack/context/nova/servers.py +++ b/rally/plugins/openstack/context/nova/servers.py @@ -99,8 +99,7 @@ class ServerGenerator(context.Context): for iter_, (user, tenant_id) in enumerate(rutils.iterate_per_tenants( self.context["users"])): - LOG.debug("Booting servers for user tenant %s ", - (user["tenant_id"])) + LOG.debug("Booting servers for user tenant %s" % user["tenant_id"]) tmp_context = {"user": user, "tenant": self.context["tenants"][tenant_id], "task": self.context["task"], @@ -110,10 +109,10 @@ class ServerGenerator(context.Context): LOG.debug("Calling _boot_servers with image_id=%(image_id)s " "flavor_id=%(flavor_id)s " - "servers_per_tenant=%(servers_per_tenant)s", - {"image_id": image_id, - "flavor_id": flavor_id, - "servers_per_tenant": servers_per_tenant}) + "servers_per_tenant=%(servers_per_tenant)s" + % {"image_id": image_id, + "flavor_id": flavor_id, + "servers_per_tenant": servers_per_tenant}) servers = nova_scenario._boot_servers(image_id, flavor_id, requests=servers_per_tenant, @@ -122,8 +121,7 @@ class ServerGenerator(context.Context): current_servers = [server.id for server in servers] - LOG.debug("Adding booted servers %s to context", - current_servers) + LOG.debug("Adding booted servers %s to context" % current_servers) self.context["tenants"][tenant_id][ "servers"] = current_servers diff --git a/rally/plugins/openstack/context/quotas/quotas.py b/rally/plugins/openstack/context/quotas/quotas.py index bb45fcb8de..429e2f49ff 100644 --- a/rally/plugins/openstack/context/quotas/quotas.py +++ b/rally/plugins/openstack/context/quotas/quotas.py @@ -95,11 +95,10 @@ class Quotas(context.Context): try: self.manager[service].delete(tenant_id) except Exception as e: - LOG.warning("Failed to remove quotas for tenant " - "%(tenant_id)s in service %(service)s " - "\n reason: %(exc)s" - % {"tenant_id": tenant_id, - "service": service, "exc": e}) + LOG.warning( + "Failed to remove quotas for tenant %(tenant)s " + "in service %(service)s reason: %(e)s" % + {"tenant": tenant_id, "service": service, "e": e}) def cleanup(self): if self.original_quotas: diff --git a/rally/plugins/openstack/context/sahara/sahara_cluster.py b/rally/plugins/openstack/context/sahara/sahara_cluster.py index 7916e35dc1..c8bf2afe29 100644 --- a/rally/plugins/openstack/context/sahara/sahara_cluster.py +++ b/rally/plugins/openstack/context/sahara/sahara_cluster.py @@ -15,7 +15,6 @@ from oslo_config import cfg -from rally.common.i18n import _ from rally.common import utils as rutils from rally.common import validation from rally import consts @@ -167,13 +166,12 @@ class SaharaCluster(context.Context): for cluster, client in dct.items(): cluster_status = cluster.status.lower() if cluster_status == "error": - msg = _("Sahara cluster %(name)s has failed to" - " %(action)s. Reason: '%(reason)s'") % { - "name": cluster.name, "action": "start", - "reason": cluster.status_description} - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg=msg) + msg = ("Sahara cluster %(name)s has failed to" + " %(action)s. Reason: '%(reason)s'" + % {"name": cluster.name, "action": "start", + "reason": cluster.status_description}) + raise exceptions.ContextSetupFailure(ctx_name=self.get_name(), + msg=msg) elif cluster_status != "active": return False return True diff --git a/rally/plugins/openstack/context/sahara/sahara_image.py b/rally/plugins/openstack/context/sahara/sahara_image.py index a3d894fecc..8883b7e40e 100644 --- a/rally/plugins/openstack/context/sahara/sahara_image.py +++ b/rally/plugins/openstack/context/sahara/sahara_image.py @@ -12,7 +12,6 @@ # License for the specific language governing permissions and limitations # under the License. -from rally.common.i18n import _ from rally.common import utils as rutils from rally.common import validation from rally import consts @@ -101,8 +100,7 @@ class SaharaImage(context.Context): if visibility != "public": raise exceptions.ContextSetupFailure( ctx_name=self.get_name(), - msg=_("Image provided in the Sahara context" - " should be public.") + msg="Use only public image for sahara_image context" ) image_id = image_uuid diff --git a/rally/plugins/openstack/context/sahara/sahara_job_binaries.py b/rally/plugins/openstack/context/sahara/sahara_job_binaries.py index 774b09df2a..f5e82d5b0b 100644 --- a/rally/plugins/openstack/context/sahara/sahara_job_binaries.py +++ b/rally/plugins/openstack/context/sahara/sahara_job_binaries.py @@ -104,7 +104,7 @@ class SaharaJobBinaries(context.Context): def setup_inputs(self, sahara, tenant_id, input_type, input_url): if input_type == "swift": raise exceptions.RallyException( - _("Swift Data Sources are not implemented yet")) + "Swift Data Sources are not implemented yet") # Todo(nkonovalov): Add swift credentials parameters and data upload input_ds = sahara.data_sources.create( name=self.generate_random_name(), diff --git a/rally/plugins/openstack/context/swift/objects.py b/rally/plugins/openstack/context/swift/objects.py index b438d555c1..6a6c82d5da 100644 --- a/rally/plugins/openstack/context/swift/objects.py +++ b/rally/plugins/openstack/context/swift/objects.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -from rally.common.i18n import _ from rally.common import logging from rally.common import validation from rally import consts @@ -65,22 +64,22 @@ class SwiftObjectGenerator(swift_utils.SwiftObjectMixin, context.Context): containers_per_tenant = self.config["containers_per_tenant"] containers_num = len(self.context["tenants"]) * containers_per_tenant - LOG.debug("Creating %d containers using %d threads.", (containers_num, - threads)) + LOG.debug("Creating %d containers using %d threads." + % (containers_num, threads)) containers_count = len(self._create_containers(self.context, containers_per_tenant, threads)) if containers_count != containers_num: raise exceptions.ContextSetupFailure( ctx_name=self.get_name(), - msg=_("Failed to create the requested number of containers, " - "expected %(expected)s but got %(actual)s.") - % {"expected": containers_num, "actual": containers_count}) + msg="Failed to create the requested number of containers, " + "expected %(expected)s but got %(actual)s." + % {"expected": containers_num, "actual": containers_count}) objects_per_container = self.config["objects_per_container"] objects_num = containers_num * objects_per_container - LOG.debug("Creating %d objects using %d threads.", (objects_num, - threads)) + LOG.debug("Creating %d objects using %d threads." + % (objects_num, threads)) objects_count = len(self._create_objects(self.context, objects_per_container, self.config["object_size"], @@ -88,9 +87,9 @@ class SwiftObjectGenerator(swift_utils.SwiftObjectMixin, context.Context): if objects_count != objects_num: raise exceptions.ContextSetupFailure( ctx_name=self.get_name(), - msg=_("Failed to create the requested number of objects, " - "expected %(expected)s but got %(actual)s.") - % {"expected": objects_num, "actual": objects_count}) + msg="Failed to create the requested number of objects, " + "expected %(expected)s but got %(actual)s." + % {"expected": objects_num, "actual": objects_count}) def cleanup(self): """Delete containers and objects, using the broker pattern.""" diff --git a/rally/plugins/openstack/context/vm/custom_image.py b/rally/plugins/openstack/context/vm/custom_image.py index 9da4f5305f..1518652825 100644 --- a/rally/plugins/openstack/context/vm/custom_image.py +++ b/rally/plugins/openstack/context/vm/custom_image.py @@ -18,7 +18,6 @@ import abc import six from rally.common import broker -from rally.common.i18n import _ from rally.common import logging from rally.common import utils from rally import consts @@ -164,13 +163,13 @@ class BaseCustomImageGenerator(context.Context): **kwargs) try: - LOG.debug("Installing tools on %r %s", server, fip["ip"]) + LOG.debug("Installing tools on %r %s" % (server, fip["ip"])) self.customize_image(server, fip, user) - LOG.debug("Stopping server %r", server) + LOG.debug("Stopping server %r" % server) vm_scenario._stop_server(server) - LOG.debug("Creating snapshot for %r", server) + LOG.debug("Creating snapshot for %r" % server) custom_image = vm_scenario._create_image(server) finally: vm_scenario._delete_server_with_fip(server, fip) @@ -205,13 +204,12 @@ class BaseCustomImageGenerator(context.Context): """Delete the image created for the user and tenant.""" with logging.ExceptionLogger( - LOG, _("Unable to delete image %s") % custom_image.id): + LOG, "Unable to delete image %s" % custom_image.id): glance_service = image.Image(user["credential"].clients()) glance_service.delete_image(custom_image.id) - @logging.log_task_wrapper(LOG.info, - _("Custom image context: customizing")) + @logging.log_task_wrapper(LOG.info, "Custom image context: customizing") def customize_image(self, server, ip, user): return self._customize_image(server, ip, user) diff --git a/rally/plugins/openstack/hook/fault_injection.py b/rally/plugins/openstack/hook/fault_injection.py index c4c3c0ad58..1cc0338a1f 100644 --- a/rally/plugins/openstack/hook/fault_injection.py +++ b/rally/plugins/openstack/hook/fault_injection.py @@ -75,5 +75,5 @@ class FaultInjectionHook(hook.Hook): if self.config.get("verify"): injector.verify() - LOG.debug("Injecting fault: %s", self.config["action"]) + LOG.debug("Injecting fault: %s" % self.config["action"]) os_faults.human_api(injector, self.config["action"]) diff --git a/rally/plugins/openstack/scenarios/cinder/utils.py b/rally/plugins/openstack/scenarios/cinder/utils.py index 2101dfb306..a61bed6c21 100644 --- a/rally/plugins/openstack/scenarios/cinder/utils.py +++ b/rally/plugins/openstack/scenarios/cinder/utils.py @@ -17,7 +17,6 @@ import random from oslo_config import cfg -from rally.common.i18n import _, _LW from rally.common import logging from rally import exceptions from rally.plugins.openstack import scenario @@ -53,11 +52,11 @@ class CinderScenario(scenario.OpenStackScenario): def __init__(self, context=None, admin_clients=None, clients=None): super(CinderScenario, self).__init__(context, admin_clients, clients) - LOG.warning(_LW( + LOG.warning( "Class %s is deprecated since Rally 0.10.0 and will be removed " "soon. Use " "rally.plugins.openstack.services.storage.block.BlockStorage " - "instead.") % self.__class__) + "instead." % self.__class__) @atomic.action_timer("cinder.list_volumes") def _list_volumes(self, detailed=True): @@ -504,5 +503,4 @@ class CinderScenario(scenario.OpenStackScenario): resp = self.admin_clients("cinder").volume_encryption_types.delete( volume_type) if (resp[0].status_code != 202): - raise exceptions.RallyException( - _("EncryptionType Deletion Failed")) + raise exceptions.RallyException("EncryptionType Deletion Failed") diff --git a/rally/plugins/openstack/scenarios/glance/utils.py b/rally/plugins/openstack/scenarios/glance/utils.py index 6be988e872..4a0cbba3f3 100644 --- a/rally/plugins/openstack/scenarios/glance/utils.py +++ b/rally/plugins/openstack/scenarios/glance/utils.py @@ -15,7 +15,6 @@ from oslo_config import cfg -from rally.common.i18n import _ from rally.common import logging from rally.plugins.openstack import scenario from rally.plugins.openstack.wrappers import glance as glance_wrapper @@ -32,11 +31,11 @@ class GlanceScenario(scenario.OpenStackScenario): def __init__(self, context=None, admin_clients=None, clients=None): super(GlanceScenario, self).__init__(context, admin_clients, clients) - LOG.warning(_( + LOG.warning( "Class %s is deprecated since Rally 0.10.0 and will be removed " "soon. Use " "rally.plugins.openstack.services.image.image.Image " - "instead.") % self.__class__) + "instead." % self.__class__) @atomic.action_timer("glance.list_images") def _list_images(self): diff --git a/rally/plugins/openstack/scenarios/heat/utils.py b/rally/plugins/openstack/scenarios/heat/utils.py index dd57f33505..e0e1b5ce35 100644 --- a/rally/plugins/openstack/scenarios/heat/utils.py +++ b/rally/plugins/openstack/scenarios/heat/utils.py @@ -292,8 +292,8 @@ class HeatScenario(scenario.OpenStackScenario): """ num_instances = self._count_instances(stack) expected_instances = num_instances + delta - LOG.debug("Scaling stack %s from %s to %s instances with %s", - (stack.id, num_instances, expected_instances, output_key)) + LOG.debug("Scaling stack %s from %s to %s instances with %s" + % (stack.id, num_instances, expected_instances, output_key)) with atomic.ActionTimer(self, "heat.scale_with_%s" % output_key): self._stack_webhook(stack, output_key) utils.wait_for( diff --git a/rally/plugins/openstack/scenarios/keystone/utils.py b/rally/plugins/openstack/scenarios/keystone/utils.py index 995a4fb4f8..b5fa4d632b 100644 --- a/rally/plugins/openstack/scenarios/keystone/utils.py +++ b/rally/plugins/openstack/scenarios/keystone/utils.py @@ -15,7 +15,6 @@ import uuid -from rally.common.i18n import _LW from rally.common import logging from rally.plugins.openstack import scenario from rally.plugins.openstack.wrappers import keystone as keystone_wrapper @@ -30,11 +29,11 @@ class KeystoneScenario(scenario.OpenStackScenario): def __init__(self, context=None, admin_clients=None, clients=None): super(KeystoneScenario, self).__init__(context, admin_clients, clients) - LOG.warning(_LW( + LOG.warning( "Class %s is deprecated since Rally 0.8.0 and will be removed " "soon. Use " "rally.plugins.openstack.services.identity.identity.Identity " - "instead.") % self.__class__) + "instead." % self.__class__) @atomic.action_timer("keystone.create_user") def _user_create(self, email=None, **kwargs): diff --git a/rally/plugins/openstack/scenarios/neutron/utils.py b/rally/plugins/openstack/scenarios/neutron/utils.py index 0f1f0b5c3a..5f92cfd363 100644 --- a/rally/plugins/openstack/scenarios/neutron/utils.py +++ b/rally/plugins/openstack/scenarios/neutron/utils.py @@ -17,7 +17,6 @@ import random from oslo_config import cfg -from rally.common.i18n import _ from rally.common import logging from rally import exceptions from rally.plugins.openstack import scenario @@ -56,8 +55,8 @@ class NeutronScenario(scenario.OpenStackScenario): for net in networks: if (net["name"] == network) or (net["id"] == network): return net["id"] - msg = (_("Network %s not found.") % network) - raise exceptions.NotFoundException(message=msg) + raise exceptions.NotFoundException( + message="Network %s not found." % network) @property def _ext_gw_mode_enabled(self): @@ -302,9 +301,9 @@ class NeutronScenario(scenario.OpenStackScenario): """ self.clients("neutron").delete_port(port["port"]["id"]) - @logging.log_deprecated_args(_("network_create_args is deprecated; " - "use the network context instead"), - "0.1.0", "network_create_args") + @logging.log_deprecated_args( + "network_create_args is deprecated; use the network context instead", + "0.1.0", "network_create_args") def _get_or_create_network(self, network_create_args=None): """Get a network from context, or create a new one. @@ -323,8 +322,8 @@ class NeutronScenario(scenario.OpenStackScenario): return {"network": random.choice(self.context["tenant"]["networks"])} else: - LOG.warning(_("Running this scenario without either the 'network' " - "or 'existing_network' context is deprecated")) + LOG.warning("Running this scenario without either the 'network' " + "or 'existing_network' context is deprecated") return self._create_network(network_create_args or {}) def _create_subnets(self, network, diff --git a/rally/plugins/openstack/scenarios/nova/flavors.py b/rally/plugins/openstack/scenarios/nova/flavors.py index 2a912c4ec6..bb382bcafa 100644 --- a/rally/plugins/openstack/scenarios/nova/flavors.py +++ b/rally/plugins/openstack/scenarios/nova/flavors.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -from rally.common.i18n import _LW from rally.common import logging from rally import consts from rally.plugins.openstack import scenario @@ -81,8 +80,8 @@ class CreateAndListFlavorAccess(utils.NovaScenario): # NOTE(pirsriva): access rules can be listed # only for non-public flavors if is_public: - LOG.warning(_LW("is_public cannot be set to True for listing " - "flavor access rules. Setting is_public to False")) + LOG.warning("is_public cannot be set to True for listing " + "flavor access rules. Setting is_public to False") is_public = False flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid, ephemeral=ephemeral, swap=swap, diff --git a/rally/plugins/openstack/scenarios/nova/utils.py b/rally/plugins/openstack/scenarios/nova/utils.py index fe988fe429..14f3c4ac84 100644 --- a/rally/plugins/openstack/scenarios/nova/utils.py +++ b/rally/plugins/openstack/scenarios/nova/utils.py @@ -17,7 +17,6 @@ import random from oslo_config import cfg -from rally.common.i18n import _ from rally.common import logging from rally import exceptions from rally.plugins.openstack import scenario @@ -766,9 +765,9 @@ class NovaScenario(scenario.OpenStackScenario): server_admin = self.admin_clients("nova").servers.get(server.id) if (host_pre_migrate == getattr(server_admin, "OS-EXT-SRV-ATTR:host") and not skip_host_check): - raise exceptions.RallyException(_( + raise exceptions.RallyException( "Live Migration failed: Migration complete " - "but instance did not change host: %s") % host_pre_migrate) + "but instance did not change host: %s" % host_pre_migrate) @atomic.action_timer("nova.find_host_to_migrate") def _find_host_to_migrate(self, server): @@ -792,7 +791,7 @@ class NovaScenario(scenario.OpenStackScenario): return new_host except IndexError: raise exceptions.RallyException( - _("Live Migration failed: No valid host found to migrate")) + "Live Migration failed: No valid host found to migrate") @atomic.action_timer("nova.migrate") def _migrate(self, server, skip_host_check=False): @@ -818,8 +817,8 @@ class NovaScenario(scenario.OpenStackScenario): host_after_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") if host_pre_migrate == host_after_migrate: raise exceptions.RallyException( - _("Migration failed: Migration complete but instance" - " did not change host: %s") % host_pre_migrate) + "Migration failed: Migration complete but instance" + " did not change host: %s" % host_pre_migrate) @atomic.action_timer("nova.add_server_secgroups") def _add_server_secgroups(self, server, security_group, diff --git a/rally/plugins/openstack/scenarios/sahara/clusters.py b/rally/plugins/openstack/scenarios/sahara/clusters.py index f8d5799570..08e8ba2e6c 100644 --- a/rally/plugins/openstack/scenarios/sahara/clusters.py +++ b/rally/plugins/openstack/scenarios/sahara/clusters.py @@ -95,7 +95,7 @@ class CreateAndDeleteCluster(utils.SaharaScenario): image_id = self.context["tenant"]["sahara"]["image"] - LOG.debug("Using Image: %s", image_id) + LOG.debug("Using Image: %s" % image_id) cluster = self._launch_cluster( flavor_id=flavor, @@ -194,7 +194,7 @@ class CreateScaleDeleteCluster(utils.SaharaScenario): image_id = self.context["tenant"]["sahara"]["image"] - LOG.debug("Using Image: %s", image_id) + LOG.debug("Using Image: %s" % image_id) cluster = self._launch_cluster( flavor_id=flavor, diff --git a/rally/plugins/openstack/scenarios/sahara/jobs.py b/rally/plugins/openstack/scenarios/sahara/jobs.py index 77905f8cd1..dc9d7dc6c2 100644 --- a/rally/plugins/openstack/scenarios/sahara/jobs.py +++ b/rally/plugins/openstack/scenarios/sahara/jobs.py @@ -92,7 +92,7 @@ class CreateLaunchJobSequence(utils.SaharaScenario): launch_job = CreateLaunchJob(self.context) for idx, job in enumerate(jobs): - LOG.debug("Launching Job. Sequence #%d", idx) + LOG.debug("Launching Job. Sequence #%d" % idx) launch_job.run(job["job_type"], job["configs"], idx) @@ -127,8 +127,8 @@ class CreateLaunchJobSequenceWithScaling(utils.SaharaScenario,): # correct 'count' values. cluster = self.clients("sahara").clusters.get(cluster_id) - LOG.debug("Scaling cluster %s with delta %d", - (cluster.name, delta)) + LOG.debug("Scaling cluster %s with delta %d" + % (cluster.name, delta)) if delta == 0: # Zero scaling makes no sense. continue diff --git a/rally/plugins/openstack/scenarios/sahara/utils.py b/rally/plugins/openstack/scenarios/sahara/utils.py index 48115433ac..44678032e1 100644 --- a/rally/plugins/openstack/scenarios/sahara/utils.py +++ b/rally/plugins/openstack/scenarios/sahara/utils.py @@ -19,7 +19,6 @@ from oslo_config import cfg from oslo_utils import uuidutils from saharaclient.api import base as sahara_base -from rally.common.i18n import _ from rally.common import logging from rally.common import utils as rutils from rally import consts @@ -127,8 +126,8 @@ class SaharaScenario(scenario.OpenStackScenario): # If the name is not found in the list. Exit with error. raise exceptions.ContextSetupFailure( ctx_name=self.get_name(), - msg=_("Could not resolve Floating IP Pool" - " name %s to id") % name_or_id) + msg="Could not resolve Floating IP Pool name %s to id" + % name_or_id) else: # Pool is not provided. Using the one set as GW for current router. @@ -167,7 +166,7 @@ class SaharaScenario(scenario.OpenStackScenario): floating_ip_pool) if floating_ip_pool_value: - LOG.debug("Using floating ip pool %s.", floating_ip_pool_value) + LOG.debug("Using floating ip pool %s." % floating_ip_pool_value) # If the pool is set by any means assign it to all node groups. # If the proxy node feature is enabled, Master Node Group and # Proxy Workers should have a floating ip pool set up @@ -233,7 +232,7 @@ class SaharaScenario(scenario.OpenStackScenario): replication_value = min(workers_count, 3) # 3 is a default Hadoop replication conf = sahara_consts.REPLICATION_CONFIGS[plugin_name][hadoop_version] - LOG.debug("Using replication factor: %s", replication_value) + LOG.debug("Using replication factor: %s" % replication_value) replication_config = { conf["target"]: { conf["config_name"]: replication_value @@ -394,7 +393,7 @@ class SaharaScenario(scenario.OpenStackScenario): ) if wait_active: - LOG.debug("Starting cluster `%s`", name) + LOG.debug("Starting cluster `%s`" % name) self._wait_active(cluster_object) return self.clients("sahara").clusters.get(cluster_object.id) @@ -454,7 +453,7 @@ class SaharaScenario(scenario.OpenStackScenario): :param cluster: cluster to delete """ - LOG.debug("Deleting cluster `%s`", cluster.name) + LOG.debug("Deleting cluster `%s`" % cluster.name) self.clients("sahara").clusters.delete(cluster.id) utils.wait_for( @@ -464,8 +463,8 @@ class SaharaScenario(scenario.OpenStackScenario): is_ready=self._is_cluster_deleted) def _is_cluster_deleted(self, cluster): - LOG.debug("Checking cluster `%s` to be deleted. Status: `%s`", - (cluster.name, cluster.status)) + LOG.debug("Checking cluster `%s` to be deleted. Status: `%s`" + % (cluster.name, cluster.status)) try: self.clients("sahara").clusters.get(cluster.id) return False @@ -482,7 +481,7 @@ class SaharaScenario(scenario.OpenStackScenario): if ds_type == "swift": raise exceptions.RallyException( - _("Swift Data Sources are not implemented yet")) + "Swift Data Sources are not implemented yet") url = url_prefix.rstrip("/") + "/%s" % self.generate_random_name() @@ -534,8 +533,8 @@ class SaharaScenario(scenario.OpenStackScenario): status = self.clients("sahara").job_executions.get(je_id).info[ "status"].lower() - LOG.debug("Checking for Job Execution %s to complete. Status: %s", - (je_id, status)) + LOG.debug("Checking for Job Execution %s to complete. Status: %s" + % (je_id, status)) if status in ("success", "succeeded"): return True elif status in ("failed", "killed"): @@ -574,8 +573,8 @@ class SaharaScenario(scenario.OpenStackScenario): # Taking net id from context. net = self.context["tenant"]["networks"][0] neutron_net_id = net["id"] - LOG.debug("Using neutron network %s.", neutron_net_id) - LOG.debug("Using neutron router %s.", net["router_id"]) + LOG.debug("Using neutron network %s." % neutron_net_id) + LOG.debug("Using neutron router %s." % net["router_id"]) return neutron_net_id diff --git a/rally/plugins/openstack/scenarios/vm/utils.py b/rally/plugins/openstack/scenarios/vm/utils.py index 0b8baf6687..459c357573 100644 --- a/rally/plugins/openstack/scenarios/vm/utils.py +++ b/rally/plugins/openstack/scenarios/vm/utils.py @@ -21,7 +21,6 @@ import netaddr from oslo_config import cfg import six -from rally.common.i18n import _ from rally.common import logging from rally.common import sshutils from rally.plugins.openstack.scenarios.nova import utils as nova_utils @@ -63,8 +62,8 @@ class Host(object): stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc.wait() - LOG.debug("Host %s is ICMP %s", - (server.ip.format(), proc.returncode and "down" or "up")) + LOG.debug("Host %s is ICMP %s" + % (server.ip.format(), proc.returncode and "down" or "up")) if proc.returncode == 0: server.status = cls.ICMP_UP_STATUS else: @@ -177,7 +176,7 @@ class VMScenario(nova_utils.NovaScenario): @atomic.action_timer("vm.delete_floating_ip") def _delete_floating_ip(self, server, fip): with logging.ExceptionLogger( - LOG, _("Unable to delete IP: %s") % fip["ip"]): + LOG, "Unable to delete IP: %s" % fip["ip"]): if self.check_ip_address(fip["ip"])(server): self._dissociate_floating_ip(server, fip["ip"]) network_wrapper.wrap(self.clients, self).delete_floating_ip( diff --git a/rally/plugins/openstack/scenarios/vm/vmtasks.py b/rally/plugins/openstack/scenarios/vm/vmtasks.py index ddd18e1d06..4c4180421a 100644 --- a/rally/plugins/openstack/scenarios/vm/vmtasks.py +++ b/rally/plugins/openstack/scenarios/vm/vmtasks.py @@ -269,7 +269,7 @@ class BootRuncommandDelete(vm_utils.VMScenario, cinder_utils.CinderBasic): exceptions.SSHTimeout): console_logs = self._get_server_console_output(server, max_log_length) - LOG.debug("VM console logs:\n%s", console_logs) + LOG.debug("VM console logs:\n%s" % console_logs) raise finally: diff --git a/rally/plugins/openstack/services/storage/cinder_common.py b/rally/plugins/openstack/services/storage/cinder_common.py index 79c29157f9..a2fbb1b7c3 100644 --- a/rally/plugins/openstack/services/storage/cinder_common.py +++ b/rally/plugins/openstack/services/storage/cinder_common.py @@ -14,7 +14,6 @@ import random -from rally.common.i18n import _ from rally import exceptions from rally.plugins.openstack.services.image import image from rally.plugins.openstack.services.storage import block @@ -454,7 +453,7 @@ class CinderMixin(object): volume_type) if (resp[0].status_code != 202): raise exceptions.RallyException( - _("EncryptionType Deletion Failed")) + "EncryptionType Deletion Failed") def update_encryption_type(self, volume_type, specs): """Update the encryption type information for the specified volume type. diff --git a/rally/plugins/openstack/verification/tempest/config.py b/rally/plugins/openstack/verification/tempest/config.py index 19ed0e58ef..3087fbc659 100644 --- a/rally/plugins/openstack/verification/tempest/config.py +++ b/rally/plugins/openstack/verification/tempest/config.py @@ -109,7 +109,7 @@ class TempestConfigfileManager(object): uri = os.path.join(cropped_auth_url, "v2.0") else: # Does Keystone released new version of API ?! - LOG.debug("Discovered keystone versions: %s", versions) + LOG.debug("Discovered keystone versions: %s" % versions) raise exceptions.RallyException("Failed to discover keystone " "auth urls.") diff --git a/rally/plugins/openstack/verification/tempest/context.py b/rally/plugins/openstack/verification/tempest/context.py index fba6513bf7..630d72b5db 100644 --- a/rally/plugins/openstack/verification/tempest/context.py +++ b/rally/plugins/openstack/verification/tempest/context.py @@ -19,7 +19,6 @@ import re import requests from six.moves import configparser -from rally.common.i18n import _ from rally.common import logging from rally import exceptions from rally.plugins.openstack.services.image import image @@ -130,20 +129,20 @@ class TempestContext(context.VerifierContext): for role in roles: if role not in existing_roles: - LOG.debug("Creating role '%s'.", role) + LOG.debug("Creating role '%s'." % role) self._created_roles.append(keystoneclient.roles.create(role)) def _configure_option(self, section, option, value=None, helper_method=None, *args, **kwargs): option_value = self.conf.get(section, option) if not option_value: - LOG.debug("Option '%s' from '%s' section " - "is not configured.", (option, section)) + LOG.debug("Option '%s' from '%s' section is not configured." + % (option, section)) if helper_method: res = helper_method(*args, **kwargs) if res: value = res["name"] if "network" in option else res.id - LOG.debug("Setting value '%s' to option '%s'.", (value, option)) + LOG.debug("Setting value '%s' to option '%s'." % (value, option)) self.conf.set(section, option, value) LOG.debug("Option '{opt}' is configured. " "{opt} = {value}".format(opt=option, value=value)) @@ -155,36 +154,36 @@ class TempestContext(context.VerifierContext): def _discover_image(self): LOG.debug("Trying to discover a public image with name matching " "regular expression '%s'. Note that case insensitive " - "matching is performed.", conf.CONF.tempest.img_name_regex) + "matching is performed." % conf.CONF.tempest.img_name_regex) image_service = image.Image(self.clients) images = image_service.list_images(status="active", visibility="public") for image_obj in images: if image_obj.name and re.match(conf.CONF.tempest.img_name_regex, image_obj.name, re.IGNORECASE): - LOG.debug("The following public " - "image discovered: '%s'.", image_obj.name) + LOG.debug("The following public image discovered: '%s'." + % image_obj.name) return image_obj LOG.debug("There is no public image with name matching regular " - "expression '%s'.", conf.CONF.tempest.img_name_regex) + "expression '%s'." % conf.CONF.tempest.img_name_regex) def _download_image_from_source(self, target_path, image=None): if image: - LOG.debug("Downloading image '%s' " - "from Glance to %s.", (image.name, target_path)) + LOG.debug("Downloading image '%s' from Glance to %s." + % (image.name, target_path)) with open(target_path, "wb") as image_file: for chunk in self.clients.glance().images.data(image.id): image_file.write(chunk) else: - LOG.debug("Downloading image from %s " - "to %s.", (conf.CONF.tempest.img_url, target_path)) + LOG.debug("Downloading image from %s to %s." + % (conf.CONF.tempest.img_url, target_path)) try: response = requests.get(conf.CONF.tempest.img_url, stream=True) except requests.ConnectionError as err: - msg = _("Failed to download image. " - "Possibly there is no connection to Internet. " - "Error: %s.") % (str(err) or "unknown") + msg = ("Failed to download image. Possibly there is no " + "connection to Internet. Error: %s." + % (str(err) or "unknown")) raise exceptions.RallyException(msg) if response.status_code == 200: @@ -195,10 +194,10 @@ class TempestContext(context.VerifierContext): image_file.flush() else: if response.status_code == 404: - msg = _("Failed to download image. Image was not found.") + msg = "Failed to download image. Image was not found." else: - msg = _("Failed to download image. " - "HTTP error code %d.") % response.status_code + msg = ("Failed to download image. HTTP error code %d." + % response.status_code) raise exceptions.RallyException(msg) LOG.debug("The image has been successfully downloaded!") @@ -206,7 +205,7 @@ class TempestContext(context.VerifierContext): def _download_image(self): image_path = os.path.join(self.data_dir, self.image_name) if os.path.isfile(image_path): - LOG.debug("Image is already downloaded to %s.", image_path) + LOG.debug("Image is already downloaded to %s." % image_path) return if conf.CONF.tempest.img_name_regex: @@ -220,8 +219,8 @@ class TempestContext(context.VerifierContext): if conf.CONF.tempest.img_name_regex: image_obj = self._discover_image() if image_obj: - LOG.debug("Using image '%s' (ID = %s) " - "for the tests.", (image_obj.name, image_obj.id)) + LOG.debug("Using image '%s' (ID = %s) for the tests." + % (image_obj.name, image_obj.id)) return image_obj params = { @@ -234,8 +233,8 @@ class TempestContext(context.VerifierContext): LOG.debug("Creating image '%s'." % params["image_name"]) image_service = image.Image(self.clients) image_obj = image_service.create_image(**params) - LOG.debug("Image '%s' (ID = %s) has been " - "successfully created!", (image_obj.name, image_obj.id)) + LOG.debug("Image '%s' (ID = %s) has been successfully created!" + % (image_obj.name, image_obj.id)) self._created_images.append(image_obj) return image_obj @@ -244,7 +243,7 @@ class TempestContext(context.VerifierContext): novaclient = self.clients.nova() LOG.debug("Trying to discover a flavor with the following " - "properties: RAM = %dMB, VCPUs = 1, disk = 0GB.", flv_ram) + "properties: RAM = %dMB, VCPUs = 1, disk = 0GB." % flv_ram) for flavor in novaclient.flavors.list(): if (flavor.ram == flv_ram and flavor.vcpus == 1 and flavor.disk == 0): @@ -262,10 +261,10 @@ class TempestContext(context.VerifierContext): "disk": 0 } LOG.debug("Creating flavor '%s' with the following properties: RAM " - "= %dMB, VCPUs = 1, disk = 0GB.", (params["name"], flv_ram)) + "= %dMB, VCPUs = 1, disk = 0GB." % (params["name"], flv_ram)) flavor = novaclient.flavors.create(**params) - LOG.debug("Flavor '%s' (ID = %s) has been " - "successfully created!", (flavor.name, flavor.id)) + LOG.debug("Flavor '%s' (ID = %s) has been successfully created!" + % (flavor.name, flavor.id)) self._created_flavors.append(flavor) return flavor @@ -285,14 +284,14 @@ class TempestContext(context.VerifierContext): def _cleanup_tempest_roles(self): keystoneclient = self.clients.keystone() for role in self._created_roles: - LOG.debug("Deleting role '%s'.", role.name) + LOG.debug("Deleting role '%s'." % role.name) keystoneclient.roles.delete(role.id) - LOG.debug("Role '%s' has been deleted.", role.name) + LOG.debug("Role '%s' has been deleted." % role.name) def _cleanup_images(self): image_service = image.Image(self.clients) for image_obj in self._created_images: - LOG.debug("Deleting image '%s'.", image_obj.name) + LOG.debug("Deleting image '%s'." % image_obj.name) self.clients.glance().images.delete(image_obj.id) task_utils.wait_for_status( image_obj, ["deleted", "pending_delete"], @@ -301,15 +300,15 @@ class TempestContext(context.VerifierContext): timeout=conf.CONF.benchmark.glance_image_delete_timeout, check_interval=conf.CONF.benchmark. glance_image_delete_poll_interval) - LOG.debug("Image '%s' has been deleted.", image_obj.name) + LOG.debug("Image '%s' has been deleted." % image_obj.name) self._remove_opt_value_from_config("compute", image_obj.id) def _cleanup_flavors(self): novaclient = self.clients.nova() for flavor in self._created_flavors: - LOG.debug("Deleting flavor '%s'.", flavor.name) + LOG.debug("Deleting flavor '%s'." % flavor.name) novaclient.flavors.delete(flavor.id) - LOG.debug("Flavor '%s' has been deleted.", flavor.name) + LOG.debug("Flavor '%s' has been deleted." % flavor.name) self._remove_opt_value_from_config("compute", flavor.id) self._remove_opt_value_from_config("orchestration", flavor.id) @@ -325,6 +324,6 @@ class TempestContext(context.VerifierContext): for option, value in self.conf.items(section): if opt_value == value: LOG.debug("Removing value '%s' of option '%s' " - "from Tempest config file.", (opt_value, option)) + "from Tempest config file." % (opt_value, option)) self.conf.set(section, option, "") - LOG.debug("Value '%s' has been removed.", opt_value) + LOG.debug("Value '%s' has been removed." % opt_value) diff --git a/rally/plugins/openstack/verification/tempest/manager.py b/rally/plugins/openstack/verification/tempest/manager.py index b1a3844a20..3f4503048e 100644 --- a/rally/plugins/openstack/verification/tempest/manager.py +++ b/rally/plugins/openstack/verification/tempest/manager.py @@ -17,7 +17,6 @@ import re import shutil import subprocess -from rally.common.i18n import _LE from rally.common import yamlutils as yaml from rally import exceptions from rally.plugins.common.verification import testr @@ -128,8 +127,8 @@ class TempestManager(testr.TestrLauncher): """Install a Tempest plugin.""" if extra_settings: raise NotImplementedError( - _LE("'%s' verifiers don't support extra installation settings " - "for extensions.") % self.get_name()) + "'%s' verifiers don't support extra installation settings " + "for extensions." % self.get_name()) version = version or "master" egg = re.sub("\.git$", "", os.path.basename(source.strip("/"))) full_source = "git+{0}@{1}#egg={2}".format(source, version, egg) diff --git a/rally/plugins/openstack/wrappers/network.py b/rally/plugins/openstack/wrappers/network.py index 0f1cdcb130..f979646469 100644 --- a/rally/plugins/openstack/wrappers/network.py +++ b/rally/plugins/openstack/wrappers/network.py @@ -18,7 +18,6 @@ import abc import netaddr import six -from rally.common.i18n import _ from rally.common import logging from rally.common import utils from rally import consts @@ -44,13 +43,13 @@ def generate_cidr(start_cidr="10.2.0.0/24"): :returns: next available CIDR str """ cidr = str(netaddr.IPNetwork(start_cidr).next(next(cidr_incr))) - LOG.debug("CIDR generated: %s", cidr) + LOG.debug("CIDR generated: %s" % cidr) return cidr class NetworkWrapperException(exceptions.RallyException): error_code = 532 - msg_fmt = _("%(message)s") + msg_fmt = "%(message)s" @six.add_metaclass(abc.ABCMeta) @@ -373,7 +372,7 @@ class NeutronWrapper(NetworkWrapper): if any(ext.get("alias") == extension for ext in extensions): return True, "" - return False, _("Neutron driver does not support %s") % (extension) + return False, "Neutron driver does not support %s" % extension def wrap(clients, owner, config=None): @@ -396,4 +395,4 @@ def wrap(clients, owner, config=None): if consts.Service.NEUTRON in services.values(): return NeutronWrapper(clients, owner, config=config) - LOG.warning(_("NovaNetworkWrapper is deprecated since 0.9.0")) + LOG.warning("NovaNetworkWrapper is deprecated since 0.9.0") diff --git a/tests/unit/plugins/openstack/cleanup/test_manager.py b/tests/unit/plugins/openstack/cleanup/test_manager.py index 1a4d892378..bcff72097a 100644 --- a/tests/unit/plugins/openstack/cleanup/test_manager.py +++ b/tests/unit/plugins/openstack/cleanup/test_manager.py @@ -92,7 +92,7 @@ class SeekAndDestroyTestCase(test.TestCase): mock_resource.delete.assert_called_once_with() self.assertEqual(4, mock_resource.is_deleted.call_count) - self.assertEqual(5, mock_log.warning.call_count) + self.assertEqual(1, mock_log.warning.call_count) self.assertEqual(4, mock_log.exception.call_count) def _manager(self, list_side_effect, **kw): diff --git a/tests/unit/plugins/openstack/context/glance/test_images.py b/tests/unit/plugins/openstack/context/glance/test_images.py index 6569ac424f..d89654a036 100644 --- a/tests/unit/plugins/openstack/context/glance/test_images.py +++ b/tests/unit/plugins/openstack/context/glance/test_images.py @@ -169,15 +169,16 @@ class ImageGeneratorTestCase(test.ScenarioTestCase): min_disk=d_min_disk, min_ram=d_min_ram ) - self.assertEqual( - [mock.call("The 'image_type' argument is deprecated since " - "Rally 0.10.0, use disk_format arguments instead."), - mock.call("The 'image_container' argument is deprecated since " - "Rally 0.10.0; use container_format arguments instead"), - mock.call("The 'image_args' argument is deprecated since " - "Rally 0.10.0; specify exact arguments in a root " - "section of context instead.")], - mock_log.warning.call_args_list) + expected_warns = [ + mock.call("The 'image_type' argument is deprecated since " + "Rally 0.10.0, use disk_format argument instead"), + mock.call("The 'image_container' argument is deprecated since " + "Rally 0.10.0; use container_format argument instead"), + mock.call("The 'image_args' argument is deprecated since " + "Rally 0.10.0; specify arguments in a root " + "section of context instead")] + + self.assertEqual(expected_warns, mock_log.warning.call_args_list) mock_image.return_value.create_image.reset_mock() mock_log.warning.reset_mock() @@ -211,15 +212,7 @@ class ImageGeneratorTestCase(test.ScenarioTestCase): ) # No matter will be deprecated arguments used or not, if they are # specified, warning message should be printed. - self.assertEqual( - [mock.call("The 'image_type' argument is deprecated since " - "Rally 0.10.0, use disk_format arguments instead."), - mock.call("The 'image_container' argument is deprecated since " - "Rally 0.10.0; use container_format arguments instead"), - mock.call("The 'image_args' argument is deprecated since " - "Rally 0.10.0; specify exact arguments in a root " - "section of context instead.")], - mock_log.warning.call_args_list) + self.assertEqual(expected_warns, mock_log.warning.call_args_list) @ddt.data( {"admin": True},