Add a hacking rule for string interpolation at logging

String interpolation should be delayed to be handled by
the logging code, rather than being done at the point
of the logging call.
See the oslo i18n guideline
* https://docs.openstack.org/oslo.i18n/latest/user/guidelines.html#adding-variables-to-log-messages
and
* https://github.com/openstack-dev/hacking/blob/master/hacking/checks/other.py#L39
Closes-Bug: #1596829

Change-Id: Ibba5791669c137be1483805db657beb907030227
This commit is contained in:
ForestLee
2017-07-17 19:03:02 +08:00
committed by Alexander Chadin
parent cb8d1a98d6
commit f607ae8ec0
16 changed files with 160 additions and 140 deletions

View File

@@ -48,7 +48,7 @@ class AuditEndpoint(object):
self._oneshot_handler.execute(audit, context)
def trigger_audit(self, context, audit_uuid):
LOG.debug("Trigger audit %s" % audit_uuid)
LOG.debug("Trigger audit %s", audit_uuid)
self.executor.submit(self.do_trigger_audit,
context,
audit_uuid)

View File

@@ -255,7 +255,7 @@ class CapacityNotificationEndpoint(CinderNotification):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@@ -286,7 +286,7 @@ class VolumeCreateEnd(VolumeNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@@ -311,7 +311,7 @@ class VolumeUpdateEnd(VolumeNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@@ -369,7 +369,7 @@ class VolumeDeleteEnd(VolumeNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))

View File

@@ -229,7 +229,7 @@ class ServiceUpdated(VersionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@@ -275,7 +275,7 @@ class InstanceCreated(VersionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@@ -310,7 +310,7 @@ class InstanceUpdated(VersionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@@ -337,7 +337,7 @@ class InstanceDeletedEnd(VersionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@@ -372,7 +372,7 @@ class LegacyInstanceUpdated(UnversionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@@ -399,7 +399,7 @@ class LegacyInstanceCreatedEnd(UnversionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@@ -426,7 +426,7 @@ class LegacyInstanceDeletedEnd(UnversionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@@ -459,7 +459,7 @@ class LegacyLiveMigratedEnd(UnversionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@@ -486,7 +486,7 @@ class LegacyInstanceResizeConfirmEnd(UnversionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
@@ -513,7 +513,7 @@ class LegacyInstanceRebuildEnd(UnversionedNotificationEndpoint):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))

View File

@@ -91,16 +91,16 @@ def _reload_scoring_engines(refresh=False):
for name in engines.keys():
se_impl = default.DefaultScoringLoader().load(name)
LOG.debug("Found Scoring Engine plugin: %s" % se_impl.get_name())
LOG.debug("Found Scoring Engine plugin: %s", se_impl.get_name())
_scoring_engine_map[se_impl.get_name()] = se_impl
engine_containers = \
default.DefaultScoringContainerLoader().list_available()
for container_id, container_cls in engine_containers.items():
LOG.debug("Found Scoring Engine container plugin: %s" %
LOG.debug("Found Scoring Engine container plugin: %s",
container_id)
for se in container_cls.get_scoring_engine_list():
LOG.debug("Found Scoring Engine plugin: %s" %
LOG.debug("Found Scoring Engine plugin: %s",
se.get_name())
_scoring_engine_map[se.get_name()] = se

View File

@@ -277,7 +277,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
resource_id = "%s_%s" % (node.uuid, node.hostname)
LOG.error(
"No values returned by %(resource_id)s "
"for %(metric_name)s" % dict(
"for %(metric_name)s", dict(
resource_id=resource_id,
metric_name=self.METRIC_NAMES[
self.config.datasource]['host_cpu_usage']))
@@ -297,7 +297,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
if instance_cpu_utilization is None:
LOG.error(
"No values returned by %(resource_id)s "
"for %(metric_name)s" % dict(
"for %(metric_name)s", dict(
resource_id=instance.uuid,
metric_name=self.METRIC_NAMES[
self.config.datasource]['instance_cpu_usage']))

View File

@@ -199,10 +199,10 @@ class NoisyNeighbor(base.NoisyNeighborBaseStrategy):
hosts_need_release[node.uuid] = {
'priority_vm': potential_priority_instance,
'noisy_vm': potential_noisy_instance}
LOG.debug("Priority VM found: %s" % (
potential_priority_instance.uuid))
LOG.debug("Noisy VM found: %s" % (
potential_noisy_instance.uuid))
LOG.debug("Priority VM found: %s",
potential_priority_instance.uuid)
LOG.debug("Noisy VM found: %s",
potential_noisy_instance.uuid)
loop_break_flag = True
break

View File

@@ -232,7 +232,8 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
LOG.warning("%s: no outlet temp data", resource_id)
continue
LOG.debug("%s: outlet temperature %f" % (resource_id, outlet_temp))
LOG.debug("%(resource)s: outlet temperature %(temp)f",
{'resource': resource_id, 'temp': outlet_temp})
instance_data = {'node': node, 'outlet_temp': outlet_temp}
if outlet_temp >= self.threshold:
# mark the node to release resources

View File

@@ -375,7 +375,8 @@ class UniformAirflow(base.BaseStrategy):
LOG.warning("%s: no airflow data", resource_id)
continue
LOG.debug("%s: airflow %f" % (resource_id, airflow))
LOG.debug("%(resource)s: airflow %(airflow)f",
{'resource': resource_id, 'airflow': airflow})
nodemap = {'node': node, 'airflow': airflow}
if airflow >= self.threshold_airflow:
# mark the node to release resources

View File

@@ -191,7 +191,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
return instance.state.value
else:
LOG.error('Unexpected instance state type, '
'state=%(state)s, state_type=%(st)s.' %
'state=%(state)s, state_type=%(st)s.',
dict(state=instance.state,
st=type(instance.state)))
raise exception.WatcherException
@@ -207,7 +207,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
return node.status.value
else:
LOG.error('Unexpected node status type, '
'status=%(status)s, status_type=%(st)s.' %
'status=%(status)s, status_type=%(st)s.',
dict(status=node.status,
st=type(node.status)))
raise exception.WatcherException
@@ -256,7 +256,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
# migration mechanism to move non active VMs.
LOG.error(
'Cannot live migrate: instance_uuid=%(instance_uuid)s, '
'state=%(instance_state)s.' % dict(
'state=%(instance_state)s.', dict(
instance_uuid=instance.uuid,
instance_state=instance_state_str))
return

View File

@@ -203,7 +203,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
if avg_meter is None:
LOG.warning(
"No values returned by %(resource_id)s "
"for %(metric_name)s" % dict(
"for %(metric_name)s", dict(
resource_id=instance.uuid, metric_name=meter))
return
if meter == 'cpu_util':
@@ -376,12 +376,12 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
normalized_load = self.normalize_hosts_load(hosts_load)
for metric in self.metrics:
metric_sd = self.get_sd(normalized_load, metric)
LOG.info("Standard deviation for %s is %s."
% (metric, metric_sd))
LOG.info("Standard deviation for %s is %s.",
(metric, metric_sd))
if metric_sd > float(self.thresholds[metric]):
LOG.info("Standard deviation of %s exceeds"
" appropriate threshold %s."
% (metric, metric_sd))
" appropriate threshold %s.",
(metric, metric_sd))
return self.simulate_migrations(hosts_load)
def add_migration(self,

View File

@@ -312,7 +312,7 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy):
else:
self.instances_migration(targets, action_counter)
LOG.debug("action total: %s, pools: %s, nodes %s " % (
LOG.debug("action total: %s, pools: %s, nodes %s ", (
action_counter.total_count,
action_counter.per_pool_count,
action_counter.per_node_count))
@@ -413,13 +413,13 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy):
pool = getattr(volume, 'os-vol-host-attr:host')
if action_counter.is_pool_max(pool):
LOG.debug("%s has objects to be migrated, but it has"
" reached the limit of parallelization." % pool)
" reached the limit of parallelization.", pool)
continue
src_type = volume.volume_type
dst_pool, dst_type = self.get_dst_pool_and_type(pool, src_type)
LOG.debug(src_type)
LOG.debug("%s %s" % (dst_pool, dst_type))
LOG.debug("%s %s", (dst_pool, dst_type))
if self.is_available(volume):
if src_type == dst_type:
@@ -448,7 +448,7 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy):
if action_counter.is_node_max(src_node):
LOG.debug("%s has objects to be migrated, but it has"
" reached the limit of parallelization." % src_node)
" reached the limit of parallelization.", src_node)
continue
dst_node = self.get_dst_node(src_node)
@@ -643,7 +643,7 @@ class ActionCounter(object):
if not self.is_total_max() and not self.is_pool_max(pool):
self.per_pool_count[pool] += 1
self.total_count += 1
LOG.debug("total: %s, per_pool: %s" % (
LOG.debug("total: %s, per_pool: %s", (
self.total_count, self.per_pool_count))
return True
return False
@@ -660,7 +660,7 @@ class ActionCounter(object):
if not self.is_total_max() and not self.is_node_max(node):
self.per_node_count[node] += 1
self.total_count += 1
LOG.debug("total: %s, per_node: %s" % (
LOG.debug("total: %s, per_node: %s", (
self.total_count, self.per_node_count))
return True
return False
@@ -679,9 +679,9 @@ class ActionCounter(object):
"""
if pool not in self.per_pool_count:
self.per_pool_count[pool] = 0
LOG.debug("the number of parallel per pool %s is %s " %
LOG.debug("the number of parallel per pool %s is %s ",
(pool, self.per_pool_count[pool]))
LOG.debug("per pool limit is %s" % self.per_pool_limit)
LOG.debug("per pool limit is %s", self.per_pool_limit)
return self.per_pool_count[pool] >= self.per_pool_limit
def is_node_max(self, node):
@@ -724,7 +724,7 @@ class BaseFilter(object):
for k, v in six.iteritems(targets):
if not self.is_allowed(k):
continue
LOG.debug("filter:%s with the key: %s" % (cond, k))
LOG.debug("filter:%s with the key: %s", (cond, k))
targets[k] = self.exec_filter(v, cond)
LOG.debug(targets)
@@ -778,7 +778,7 @@ class ProjectSortFilter(SortMovingToFrontFilter):
"""
project_id = self.get_project_id(item)
LOG.debug("project_id: %s, sort_key: %s" % (project_id, sort_key))
LOG.debug("project_id: %s, sort_key: %s", (project_id, sort_key))
return project_id == sort_key
def get_project_id(self, item):
@@ -812,7 +812,7 @@ class ComputeHostSortFilter(SortMovingToFrontFilter):
"""
host = self.get_host(item)
LOG.debug("host: %s, sort_key: %s" % (host, sort_key))
LOG.debug("host: %s, sort_key: %s", (host, sort_key))
return host == sort_key
def get_host(self, item):
@@ -840,7 +840,7 @@ class StorageHostSortFilter(SortMovingToFrontFilter):
"""
host = self.get_host(item)
LOG.debug("host: %s, sort_key: %s" % (host, sort_key))
LOG.debug("host: %s, sort_key: %s", (host, sort_key))
return host == sort_key
def get_host(self, item):
@@ -867,7 +867,7 @@ class ComputeSpecSortFilter(BaseFilter):
result = items
if sort_key not in self.accept_keys:
LOG.warning("Invalid key is specified: %s" % sort_key)
LOG.warning("Invalid key is specified: %s", sort_key)
else:
result = self.get_sorted_items(items, sort_key)
@@ -912,11 +912,11 @@ class ComputeSpecSortFilter(BaseFilter):
:returns: memory size of item
"""
LOG.debug("item: %s, flavors: %s" % (item, flavors))
LOG.debug("item: %s, flavors: %s", (item, flavors))
for flavor in flavors:
LOG.debug("item.flavor: %s, flavor: %s" % (item.flavor, flavor))
LOG.debug("item.flavor: %s, flavor: %s", (item.flavor, flavor))
if item.flavor.get('id') == flavor.id:
LOG.debug("flavor.ram: %s" % flavor.ram)
LOG.debug("flavor.ram: %s", flavor.ram)
return flavor.ram
def get_vcpu_num(self, item, flavors):
@@ -927,11 +927,11 @@ class ComputeSpecSortFilter(BaseFilter):
:returns: vcpu number of item
"""
LOG.debug("item: %s, flavors: %s" % (item, flavors))
LOG.debug("item: %s, flavors: %s", (item, flavors))
for flavor in flavors:
LOG.debug("item.flavor: %s, flavor: %s" % (item.flavor, flavor))
LOG.debug("item.flavor: %s, flavor: %s", (item.flavor, flavor))
if item.flavor.get('id') == flavor.id:
LOG.debug("flavor.vcpus: %s" % flavor.vcpus)
LOG.debug("flavor.vcpus: %s", flavor.vcpus)
return flavor.vcpus
def get_disk_size(self, item, flavors):
@@ -942,11 +942,11 @@ class ComputeSpecSortFilter(BaseFilter):
:returns: disk size of item
"""
LOG.debug("item: %s, flavors: %s" % (item, flavors))
LOG.debug("item: %s, flavors: %s", (item, flavors))
for flavor in flavors:
LOG.debug("item.flavor: %s, flavor: %s" % (item.flavor, flavor))
LOG.debug("item.flavor: %s, flavor: %s", (item.flavor, flavor))
if item.flavor.get('id') == flavor.id:
LOG.debug("flavor.disk: %s" % flavor.disk)
LOG.debug("flavor.disk: %s", flavor.disk)
return flavor.disk
@@ -960,7 +960,7 @@ class StorageSpecSortFilter(BaseFilter):
result = items
if sort_key not in self.accept_keys:
LOG.warning("Invalid key is specified: %s" % sort_key)
LOG.warning("Invalid key is specified: %s", sort_key)
return result
if sort_key == 'created_at':