Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

mgr: replace iteritems with six.iteritems #22870

Merged
merged 7 commits into from Jul 6, 2018
63 changes: 32 additions & 31 deletions src/pybind/mgr/balancer/module.py
Expand Up @@ -8,6 +8,7 @@
import json
import math
import random
import six
import time
from mgr_module import MgrModule, CommandResult
from threading import Event
Expand Down Expand Up @@ -38,13 +39,13 @@ def __init__(self, osdmap, pg_dump, desc=''):
self.pg_up_by_poolid = {}
for poolid in self.poolids:
self.pg_up_by_poolid[poolid] = osdmap.map_pool_pgs_up(poolid)
for a,b in self.pg_up_by_poolid[poolid].iteritems():
for a,b in six.iteritems(self.pg_up_by_poolid[poolid]):
self.pg_up[a] = b

def calc_misplaced_from(self, other_ms):
num = len(other_ms.pg_up)
misplaced = 0
for pgid, before in other_ms.pg_up.iteritems():
for pgid, before in six.iteritems(other_ms.pg_up):
if before != self.pg_up.get(pgid, []):
misplaced += 1
if num > 0:
Expand Down Expand Up @@ -81,10 +82,10 @@ def show(self):
if len(self.compat_ws) and \
not CRUSHMap.have_default_choose_args(self.initial.crush_dump):
ls.append('ceph osd crush weight-set create-compat')
for osd, weight in self.compat_ws.iteritems():
for osd, weight in six.iteritems(self.compat_ws):
ls.append('ceph osd crush weight-set reweight-compat %s %f' %
(osd, weight))
for osd, weight in self.osd_weights.iteritems():
for osd, weight in six.iteritems(self.osd_weights):
ls.append('ceph osd reweight osd.%d %f' % (osd, weight))
incdump = self.inc.dump()
for pgid in incdump.get('old_pg_upmap_items', []):
Expand Down Expand Up @@ -160,7 +161,7 @@ def calc_stats(self, count, target, total):
score = 0.0
sum_weight = 0.0

for k, v in count[t].iteritems():
for k, v in six.iteritems(count[t]):
# adjust/normalize by weight
if target[k]:
adjusted = float(v) / target[k] / float(num)
Expand Down Expand Up @@ -481,12 +482,12 @@ def calc_eval(self, ms, pools):
weight_map = ms.crush.get_take_weight_osd_map(rootid)
adjusted_map = {
osd: cw * osd_weight[osd]
for osd,cw in weight_map.iteritems() if osd in osd_weight and cw > 0
for osd,cw in six.iteritems(weight_map) if osd in osd_weight and cw > 0
}
sum_w = sum(adjusted_map.values())
assert len(adjusted_map) == 0 or sum_w > 0
pe.target_by_root[root] = { osd: w / sum_w
for osd,w in adjusted_map.iteritems() }
for osd,w in six.iteritems(adjusted_map) }
actual_by_root[root] = {
'pgs': {},
'objects': {},
Expand All @@ -506,7 +507,7 @@ def calc_eval(self, ms, pools):
self.log.debug('target_by_root %s' % pe.target_by_root)

# pool and root actual
for pool, pi in pool_info.iteritems():
for pool, pi in six.iteritems(pool_info):
poolid = pi['pool']
pm = ms.pg_up_by_poolid[poolid]
pgs = 0
Expand All @@ -520,7 +521,7 @@ def calc_eval(self, ms, pools):
pgs_by_osd[osd] = 0
objects_by_osd[osd] = 0
bytes_by_osd[osd] = 0
for pgid, up in pm.iteritems():
for pgid, up in six.iteritems(pm):
for osd in [int(osd) for osd in up]:
if osd == CRUSHMap.ITEM_NONE:
continue
Expand All @@ -546,29 +547,29 @@ def calc_eval(self, ms, pools):
pe.count_by_pool[pool] = {
'pgs': {
k: v
for k, v in pgs_by_osd.iteritems()
for k, v in six.iteritems(pgs_by_osd)
},
'objects': {
k: v
for k, v in objects_by_osd.iteritems()
for k, v in six.iteritems(objects_by_osd)
},
'bytes': {
k: v
for k, v in bytes_by_osd.iteritems()
for k, v in six.iteritems(bytes_by_osd)
},
}
pe.actual_by_pool[pool] = {
'pgs': {
k: float(v) / float(max(pgs, 1))
for k, v in pgs_by_osd.iteritems()
for k, v in six.iteritems(pgs_by_osd)
},
'objects': {
k: float(v) / float(max(objects, 1))
for k, v in objects_by_osd.iteritems()
for k, v in six.iteritems(objects_by_osd)
},
'bytes': {
k: float(v) / float(max(bytes, 1))
for k, v in bytes_by_osd.iteritems()
for k, v in six.iteritems(bytes_by_osd)
},
}
pe.total_by_pool[pool] = {
Expand All @@ -580,29 +581,29 @@ def calc_eval(self, ms, pools):
pe.count_by_root[root] = {
'pgs': {
k: float(v)
for k, v in actual_by_root[root]['pgs'].iteritems()
for k, v in six.iteritems(actual_by_root[root]['pgs'])
},
'objects': {
k: float(v)
for k, v in actual_by_root[root]['objects'].iteritems()
for k, v in six.iteritems(actual_by_root[root]['objects'])
},
'bytes': {
k: float(v)
for k, v in actual_by_root[root]['bytes'].iteritems()
for k, v in six.iteritems(actual_by_root[root]['bytes'])
},
}
pe.actual_by_root[root] = {
'pgs': {
k: float(v) / float(max(pe.total_by_root[root]['pgs'], 1))
for k, v in actual_by_root[root]['pgs'].iteritems()
for k, v in six.iteritems(actual_by_root[root]['pgs'])
},
'objects': {
k: float(v) / float(max(pe.total_by_root[root]['objects'], 1))
for k, v in actual_by_root[root]['objects'].iteritems()
for k, v in six.iteritems(actual_by_root[root]['objects'])
},
'bytes': {
k: float(v) / float(max(pe.total_by_root[root]['bytes'], 1))
for k, v in actual_by_root[root]['bytes'].iteritems()
for k, v in six.iteritems(actual_by_root[root]['bytes'])
},
}
self.log.debug('actual_by_pool %s' % pe.actual_by_pool)
Expand All @@ -614,7 +615,7 @@ def calc_eval(self, ms, pools):
b,
pe.target_by_root[a],
pe.total_by_root[a]
) for a, b in pe.count_by_root.iteritems()
) for a, b in six.iteritems(pe.count_by_root)
}
self.log.debug('stats_by_root %s' % pe.stats_by_root)

Expand All @@ -630,8 +631,8 @@ def calc_eval(self, ms, pools):

# total score is just average of normalized stddevs
pe.score = 0.0
for r, vs in pe.score_by_root.iteritems():
for k, v in vs.iteritems():
for r, vs in six.iteritems(pe.score_by_root):
for k, v in six.iteritems(vs):
pe.score += v
pe.score /= 3 * len(roots)
return pe
Expand Down Expand Up @@ -749,14 +750,14 @@ def do_crush_compat(self, plan):
# get current osd reweights
orig_osd_weight = { a['osd']: a['weight']
for a in ms.osdmap_dump.get('osds',[]) }
reweighted_osds = [ a for a,b in orig_osd_weight.iteritems()
reweighted_osds = [ a for a,b in six.iteritems(orig_osd_weight)
if b < 1.0 and b > 0.0 ]

# get current compat weight-set weights
orig_ws = self.get_compat_weight_set_weights(ms)
if not orig_ws:
return -errno.EAGAIN, 'compat weight-set not available'
orig_ws = { a: b for a, b in orig_ws.iteritems() if a >= 0 }
orig_ws = { a: b for a, b in six.iteritems(orig_ws) if a >= 0 }

# Make sure roots don't overlap their devices. If so, we
# can't proceed.
Expand All @@ -765,7 +766,7 @@ def do_crush_compat(self, plan):
visited = {}
overlap = {}
root_ids = {}
for root, wm in pe.target_by_root.iteritems():
for root, wm in six.iteritems(pe.target_by_root):
for osd in wm.iterkeys():
if osd in visited:
if osd not in overlap:
Expand Down Expand Up @@ -837,7 +838,7 @@ def do_crush_compat(self, plan):

# normalize weights under this root
root_weight = crush.get_item_weight(pe.root_ids[root])
root_sum = sum(b for a,b in next_ws.iteritems()
root_sum = sum(b for a,b in six.iteritems(next_ws)
if a in target.keys())
if root_sum > 0 and root_weight > 0:
factor = root_sum / root_weight
Expand Down Expand Up @@ -894,7 +895,7 @@ def do_crush_compat(self, plan):
if best_pe.score < pe.score + fudge:
self.log.info('Success, score %f -> %f', pe.score, best_pe.score)
plan.compat_ws = best_ws
for osd, w in best_ow.iteritems():
for osd, w in six.iteritems(best_ow):
if w != orig_osd_weight[osd]:
self.log.debug('osd.%d reweight %f', osd, w)
plan.osd_weights[osd] = w
Expand Down Expand Up @@ -981,7 +982,7 @@ def execute(self, plan):
self.log.error('Error creating compat weight-set')
return r, outs

for osd, weight in plan.compat_ws.iteritems():
for osd, weight in six.iteritems(plan.compat_ws):
self.log.info('ceph osd crush weight-set reweight-compat osd.%d %f',
osd, weight)
result = CommandResult('')
Expand All @@ -995,7 +996,7 @@ def execute(self, plan):

# new_weight
reweightn = {}
for osd, weight in plan.osd_weights.iteritems():
for osd, weight in six.iteritems(plan.osd_weights):
reweightn[str(osd)] = str(int(weight * float(0x10000)))
if len(reweightn):
self.log.info('ceph osd reweightn %s', reweightn)
Expand Down
5 changes: 3 additions & 2 deletions src/pybind/mgr/crash/module.py
Expand Up @@ -2,6 +2,7 @@
import datetime
import errno
import json
import six


DATEFMT = '%Y-%m-%d %H:%M:%S.%f'
Expand Down Expand Up @@ -82,7 +83,7 @@ def do_prune(self, cmd, inbuf):

keeptime = datetime.timedelta(days=keep)

for key, meta in self.get_store_prefix('crash/').iteritems():
for key, meta in six.iteritems(self.get_store_prefix('crash/')):
meta = json.loads(meta)
stamp = self.time_from_string(meta['timestamp'])
if stamp <= now - keeptime:
Expand Down Expand Up @@ -116,7 +117,7 @@ def binstr(bindict):
'idlist': list()
}

for key, meta in self.get_store_prefix('crash/').iteritems():
for key, meta in six.iteritems(self.get_store_prefix('crash/')):
total += 1
meta = json.loads(meta)
stamp = self.time_from_string(meta['timestamp'])
Expand Down
7 changes: 4 additions & 3 deletions src/pybind/mgr/influx/module.py
Expand Up @@ -2,6 +2,7 @@
from threading import Event
import json
import errno
import six
import time

from mgr_module import MgrModule
Expand Down Expand Up @@ -149,7 +150,7 @@ def get_pg_summary(self, pool_info):
osd_sum = pg_sum['by_osd']
pool_sum = pg_sum['by_pool']
data = []
for osd_id, stats in osd_sum.iteritems():
for osd_id, stats in six.iteritems(osd_sum):
metadata = self.get_metadata('osd', "%s" % osd_id)
for stat in stats:
point_1 = {
Expand All @@ -165,7 +166,7 @@ def get_pg_summary(self, pool_info):
}
}
data.append(point_1)
for pool_id, stats in pool_sum.iteritems():
for pool_id, stats in six.iteritems(pool_sum):
for stat in stats:
point_2 = {
"measurement": "ceph_pg_summary_pool",
Expand All @@ -188,7 +189,7 @@ def get_daemon_stats(self):

now = datetime.utcnow().isoformat() + 'Z'

for daemon, counters in self.get_all_perf_counters().iteritems():
for daemon, counters in six.iteritems(self.get_all_perf_counters()):
svc_type, svc_id = daemon.split(".", 1)
metadata = self.get_metadata(svc_type, svc_id)

Expand Down
3 changes: 2 additions & 1 deletion src/pybind/mgr/mgr_module.py
Expand Up @@ -3,6 +3,7 @@

import json
import logging
import six
import threading
from collections import defaultdict
import rados
Expand Down Expand Up @@ -141,7 +142,7 @@ def find_takes(self):

def get_take_weight_osd_map(self, root):
uglymap = self._get_take_weight_osd_map(root)
return { int(k): v for k, v in uglymap.get('weights', {}).iteritems() }
return { int(k): v for k, v in six.iteritems(uglymap.get('weights', {})) }

@staticmethod
def have_default_choose_args(dump):
Expand Down
9 changes: 2 additions & 7 deletions src/pybind/mgr/restful/module.py
Expand Up @@ -11,6 +11,7 @@
import tempfile
import threading
import traceback
import six
import socket

from . import common
Expand All @@ -26,12 +27,6 @@
from mgr_module import MgrModule, CommandResult


try:
iteritems = dict.iteritems
except:
iteritems = dict.items


class CannotServe(Exception):
pass

Expand Down Expand Up @@ -270,7 +265,7 @@ def serve(self):
def refresh_keys(self):
self.keys = {}
rawkeys = self.get_store_prefix('keys/') or {}
for k, v in iteritems(rawkeys):
for k, v in six.iteritems(rawkeys):
self.keys[k[5:]] = v # strip of keys/ prefix

def _serve(self):
Expand Down
9 changes: 5 additions & 4 deletions src/pybind/mgr/status/module.py
Expand Up @@ -5,9 +5,10 @@

from collections import defaultdict
from prettytable import PrettyTable
import prettytable
import fnmatch
import errno
import fnmatch
import prettytable
import six

from mgr_module import MgrModule

Expand Down Expand Up @@ -187,7 +188,7 @@ def handle_fs_status(self, cmd):
])

# Find the standby replays
for gid_str, daemon_info in mdsmap['info'].iteritems():
for gid_str, daemon_info in six.iteritems(mdsmap['info']):
if daemon_info['state'] != "up:standby-replay":
continue

Expand Down Expand Up @@ -242,7 +243,7 @@ def handle_fs_status(self, cmd):
output += "MDS version: {0}".format(mds_versions.keys()[0])
else:
version_table = PrettyTable(["version", "daemons"])
for version, daemons in mds_versions.iteritems():
for version, daemons in six.iteritems(mds_versions):
version_table.add_row([
version,
", ".join(daemons)
Expand Down
3 changes: 2 additions & 1 deletion src/pybind/mgr/telegraf/module.py
@@ -1,6 +1,7 @@
import errno
import json
import itertools
import six
import socket
import time
from threading import Event
Expand Down Expand Up @@ -102,7 +103,7 @@ def get_pool_stats(self):
}

def get_daemon_stats(self):
for daemon, counters in self.get_all_perf_counters().iteritems():
for daemon, counters in six.iteritems(self.get_all_perf_counters()):
svc_type, svc_id = daemon.split('.', 1)
metadata = self.get_metadata(svc_type, svc_id)

Expand Down