Bump hacking

hacking 3.0.x is quite old. Bump it to the current latest version.

Change-Id: I8d87fed6afe5988678c64090af261266d1ca20e6
This commit is contained in:
Takashi Kajinami
2024-09-22 23:54:36 +09:00
parent a9dc3794a6
commit 566a830f64
23 changed files with 71 additions and 78 deletions

View File

@@ -91,8 +91,8 @@ class DataSourceBase(object):
except Exception as e:
LOG.exception(e)
self.query_retry_reset(e)
LOG.warning("Retry {0} of {1} while retrieving metrics retry "
"in {2} seconds".format(i+1, num_retries, timeout))
LOG.warning("Retry %d of %d while retrieving metrics retry "
"in %d seconds", i+1, num_retries, timeout)
time.sleep(timeout)
@abc.abstractmethod

View File

@@ -90,8 +90,8 @@ class GnocchiHelper(base.DataSourceBase):
**kwargs)
if not resources:
LOG.warning("The {0} resource {1} could not be "
"found".format(self.NAME, resource_id))
LOG.warning("The %s resource %s could not be found",
self.NAME, resource_id)
return
resource_id = resources[0]['id']
@@ -99,7 +99,7 @@ class GnocchiHelper(base.DataSourceBase):
if meter_name == "instance_cpu_usage":
if resource_type != "instance":
LOG.warning("Unsupported resource type for metric "
"'instance_cpu_usage': ", resource_type)
"'instance_cpu_usage': %s", resource_type)
return
# The "cpu_util" gauge (percentage) metric has been removed.
@@ -172,8 +172,8 @@ class GnocchiHelper(base.DataSourceBase):
**kwargs)
if not resources:
LOG.warning("The {0} resource {1} could not be "
"found".format(self.NAME, resource_id))
LOG.warning("The %s resource %s could not be found",
self.NAME, resource_id)
return
resource_id = resources[0]['id']

View File

@@ -158,8 +158,9 @@ class GrafanaHelper(base.DataSourceBase):
try:
self.METRIC_MAP[meter_name]
except KeyError:
LOG.error("Metric: {0} does not appear in the current Grafana "
"metric map".format(meter_name))
LOG.error(
"Metric: %s does not appear in the current Grafana metric map",
meter_name)
raise exception.MetricNotAvailable(metric=meter_name)
db = self.METRIC_MAP[meter_name]['db']
@@ -184,7 +185,7 @@ class GrafanaHelper(base.DataSourceBase):
resp = self.query_retry(self._request, **kwargs)
if not resp:
LOG.warning("Datasource {0} is not available.".format(self.NAME))
LOG.warning("Datasource %s is not available.", self.NAME)
return
result = translator.extract_result(resp.content)

View File

@@ -57,8 +57,8 @@ class InfluxDBGrafanaTranslator(BaseGrafanaTranslator):
resource = self._extract_attribute(
data['resource'], data['attribute'])
except AttributeError:
LOG.error("Resource: {0} does not contain attribute {1}".format(
data['resource'], data['attribute']))
LOG.error("Resource: %s does not contain attribute %s",
data['resource'], data['attribute'])
raise
# Granularity is optional if it is None the minimal value for InfluxDB
@@ -82,7 +82,7 @@ class InfluxDBGrafanaTranslator(BaseGrafanaTranslator):
index_aggregate = result['columns'].index(self._data['aggregate'])
return result['values'][0][index_aggregate]
except KeyError:
LOG.error("Could not extract {0} for the resource: {1}".format(
self._data['metric'], self._data['resource']))
LOG.error("Could not extract %s for the resource: %s",
self._data['metric'], self._data['resource'])
raise exception.NoSuchMetricForHost(
metric=self._data['metric'], host=self._data['resource'])

View File

@@ -37,8 +37,8 @@ class DataSourceManager(object):
(mon.MonascaHelper.NAME, mon.MonascaHelper.METRIC_MAP),
(graf.GrafanaHelper.NAME, graf.GrafanaHelper.METRIC_MAP),
])
"""Dictionary with all possible datasources, dictionary order is the default
order for attempting to use datasources
"""Dictionary with all possible datasources, dictionary order is
the default order for attempting to use datasources
"""
def __init__(self, config=None, osc=None):
@@ -127,8 +127,9 @@ class DataSourceManager(object):
if (metric not in self.metric_map[datasource] or
self.metric_map[datasource].get(metric) is None):
no_metric = True
LOG.warning("Datasource: {0} could not be used due to "
"metric: {1}".format(datasource, metric))
LOG.warning(
"Datasource: %s could not be used due to metric: %s",
datasource, metric)
break
if not no_metric:
# Try to use a specific datasource but attempt additional

View File

@@ -216,9 +216,9 @@ class BaseModelBuilder(object):
except Exception as e:
LOG.exception(e)
self.call_retry_reset(e)
LOG.warning("Retry {0} of {1}, error while calling service "
"retry in {2} seconds".format(i+1, num_retries,
timeout))
LOG.warning("Retry %d of %d, error while calling service "
"retry in %s seconds",
i+1, num_retries, timeout)
time.sleep(timeout)
raise

View File

@@ -274,7 +274,7 @@ class NovaModelBuilder(base.BaseModelBuilder):
instances = getattr(node_info, "servers", None)
# Do not submit job if there are no instances on compute node
if instances is None:
LOG.info("No instances on compute_node: {0}".format(node_info))
LOG.info("No instances on compute_node: %s", node_info)
return
future_instances.append(
self.executor.submit(
@@ -330,7 +330,7 @@ class NovaModelBuilder(base.BaseModelBuilder):
self.nova_helper.get_compute_node_by_name,
node, servers=True, detailed=True)
for node in compute_nodes]
LOG.debug("submitted {0} jobs".format(len(compute_nodes)))
LOG.debug("submitted %d jobs", len(compute_nodes))
# Futures will concurrently be added, only safe with CPython GIL
future_instances = []
@@ -427,7 +427,7 @@ class NovaModelBuilder(base.BaseModelBuilder):
def add_instance_node(self, node, instances):
if instances is None:
LOG.info("no instances on compute_node: {0}".format(node))
LOG.info("no instances on compute_node: %s", node)
return
host = node.service["host"]
compute_node = self.model.get_node_by_uuid(node.id)

View File

@@ -180,8 +180,8 @@ class NoisyNeighbor(base.NoisyNeighborBaseStrategy):
for potential_noisy_instance in (
instance_priority_list_reverse):
if(potential_noisy_instance ==
potential_priority_instance):
if (potential_noisy_instance ==
potential_priority_instance):
loop_break_flag = True
break

View File

@@ -205,7 +205,7 @@ class UniformAirflow(base.BaseStrategy):
host = nodemap['node']
if 'cores_used' not in nodemap:
# calculate the available resources
nodemap['cores_used'], nodemap['mem_used'],\
nodemap['cores_used'], nodemap['mem_used'], \
nodemap['disk_used'] = self.calculate_used_resource(
host)
cores_available = (host.vcpus -