Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

apply most of python futurist stage1 tools for python3 compatibility #23208

Merged
merged 3 commits into from May 23, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -488,9 +488,9 @@ def _customSetattr(obj, attr, val):
- `val`: value of the attribute.
"""

if type(attr) is tuple and len(attr) > 1:
if isinstance(attr, tuple) and len(attr) > 1:
_customSetattr(getattr(obj, attr[0]), attr[1:], val)
else:
if type(attr) is tuple: attr = attr[0]
if isinstance(attr, tuple): attr = attr[0]
setattr(obj, attr, val)

Expand Up @@ -6,6 +6,7 @@
import sqlalchemy
import subprocess
import CondCore.Utilities.conddblib as conddb
from functools import reduce


def create_single_iov_db(inputs, run_number, output_db):
Expand Down
Expand Up @@ -1074,7 +1074,7 @@ def merge_strings(strings):
- `strings`: list of strings
"""

if type(strings) == str:
if isinstance(strings, str):
return strings
elif len(strings) == 0:
return ""
Expand Down
6 changes: 2 additions & 4 deletions Alignment/MuonAlignment/python/svgfig.py
Expand Up @@ -2692,8 +2692,7 @@ def compute_miniticks(self, original_ticks):
Normally only used internally.
"""
if len(original_ticks) < 2: original_ticks = ticks(self.low, self.high)
original_ticks = original_ticks.keys()
original_ticks.sort()
original_ticks = sorted(original_ticks.keys())

if self.low > original_ticks[0] + _epsilon or self.high < original_ticks[-1] - _epsilon:
raise ValueError("original_ticks {%g...%g} extend beyond [%g, %g]" % (original_ticks[0], original_ticks[-1], self.low, self.high))
Expand Down Expand Up @@ -2746,8 +2745,7 @@ def compute_logticks(self, base, N, format):
if self.low <= x <= self.high: output[x] = label

for i in range(1, len(output)):
keys = output.keys()
keys.sort()
keys = sorted(output.keys())
keys = keys[::i]
values = map(lambda k: output[k], keys)
if len(values) <= N:
Expand Down
6 changes: 2 additions & 4 deletions Alignment/MuonAlignmentAlgorithms/python/gather_cfg.py
Expand Up @@ -84,11 +84,9 @@
if json_file is not None and json_file != '':
jsonfile=file(json_file, 'r')
jsondict = json.load(jsonfile)
runs = jsondict.keys()
runs.sort()
runs = sorted(jsondict.keys())
for run in runs:
blocks = jsondict[run]
blocks.sort()
blocks = sorted(jsondict[run])
prevblock = [-2,-2]
for lsrange in blocks:
if lsrange[0] == prevblock[1]+1:
Expand Down
3 changes: 1 addition & 2 deletions Alignment/MuonAlignmentAlgorithms/scripts/createTree.py
Expand Up @@ -123,8 +123,7 @@ def parseDir(dir,label,it1="",itN=""):
print "directory ", dir, "has no ", itN, " in it!!"
return ["problem!!!",""]
res = [label,dir]
files = os.listdir(dir)
files.sort()
files = sorted(os.listdir(dir))
for f in files:
if re.match(".+\.png", f):
if len(it1)>0 and len(itN)>0:
Expand Down
Expand Up @@ -416,8 +416,7 @@ def getJSONGoodRuns():
ff.close()
#os.system('rm /tmp/runs_and_files_full_of_pink_bunnies')

uniq_list_of_runs = list(set(list_of_runs))
uniq_list_of_runs.sort()
uniq_list_of_runs = sorted(set(list_of_runs))

print "### list of runs with good B field and quality in the dataset: ###"
print uniq_list_of_runs
Expand Down
6 changes: 2 additions & 4 deletions Alignment/MuonAlignmentAlgorithms/scripts/plotscripts.py
Expand Up @@ -401,8 +401,7 @@ def philines(name, window, abscissa):
philine_tlines[-1].Draw()
if "st" in name: # DT labels
philine_labels = []
edges = edges[:]
edges.sort()
edges = sorted(edges[:])
if "st4" in name:
labels = [" 7", " 8", " 9", "14", "10", "11", "12", " 1", " 2", " 3", "13", " 4", " 5", " 6"]
else:
Expand All @@ -417,8 +416,7 @@ def philines(name, window, abscissa):
philine_labels[-1].Draw()
if "CSC" in name: # DT labels
philine_labels = []
edges = edges[:]
edges.sort()
edges = sorted(edges[:])
labels = [" 1", " 2", " 3", " 4", " 5", " 6", " 7", " 8", " 9", "10", "11", "12", "13", "14", "15", "16", "17", "18",
"19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36"]
#else:
Expand Down
Expand Up @@ -85,8 +85,7 @@
empty = False

if not empty:
keys = byRing.keys()
keys.sort()
keys = sorted(byRing.keys())
print "for fitter in process.looper.algoConfig.fitters:"
for ringName in keys:
if len(byRing[ringName]) > 0:
Expand Down
Expand Up @@ -73,7 +73,7 @@ def getSectionLines(self,section):
except:
pass
if not parsed:
if type(configItem[1]) == list:
if isinstance(configItem[1], list):
sectionLines.append('config.%s.%s = %s'%(section,configItem[0],str(configItem[1])))
parsed = True
if not parsed:
Expand Down
4 changes: 2 additions & 2 deletions CalibMuon/DTCalibration/python/Workflow/DTTtrigWorkflow.py
Expand Up @@ -54,7 +54,7 @@ def prepare_workflow(self):

def get_output_db(self, workflow_mode, command):
output_db_file = self.output_db_dict[workflow_mode]
if type(output_db_file) == dict:
if isinstance(output_db_file, dict):
return output_db_file[command]
return output_db_file
####################################################################
Expand All @@ -81,7 +81,7 @@ def prepare_timeboxes_check(self):

def prepare_timeboxes_write(self):
self.output_db_file = self.output_db_dict[self.options.workflow_mode]
if type(self.output_db_dict[self.options.workflow_mode]) == dict:
if isinstance(self.output_db_dict[self.options.workflow_mode], dict):
self.output_db_file = self.output_db_file[self.options.command]
self.prepare_common_write()
merged_file = os.path.join(self.result_path, self.output_file)
Expand Down
2 changes: 1 addition & 1 deletion CalibMuon/DTCalibration/python/Workflow/DTWorkflow.py
Expand Up @@ -45,7 +45,7 @@ def check_missing_options(self, requirements_dict):
for option in requirements_dict[self.options.command]:
if not (hasattr(self.options, option)
and ( (getattr(self.options,option))
or type(getattr(self.options,option)) == bool )):
or isinstance(getattr(self.options,option), bool) )):
missing_options.append(option)
if len(missing_options) > 0:
err = "The following CLI options are missing"
Expand Down
3 changes: 1 addition & 2 deletions CalibTracker/SiStripChannelGain/test/PCL/Launch.py
Expand Up @@ -10,8 +10,7 @@
FIRSTRUN=0 #200190

runs = []
results = commands.getstatusoutput('dbs search --query="find run,sum(block.numevents) where dataset='+DATASET+' and run>='+str(FIRSTRUN)+'"')[1].splitlines()
results.sort()
results = sorted(commands.getstatusoutput('dbs search --query="find run,sum(block.numevents) where dataset='+DATASET+' and run>='+str(FIRSTRUN)+'"')[1].splitlines())
for line in results:
linesplit = line.split(' ')
if(len(linesplit)<2):continue
Expand Down
Expand Up @@ -30,8 +30,7 @@ def mail(STDruns,AAGruns,cleanUpLog):
else:
runs[run[0]]+=1

runsOrdered = runs.keys()
runsOrdered.sort()
runsOrdered = sorted(runs.keys())

for r in runsOrdered:
message+=" Run %s (%s jobs) \n"%(r,runs[r])
Expand Down
@@ -1,6 +1,7 @@
#!/usr/bin/env python
from __future__ import absolute_import
import urllib
import Config
from . import Config
import string
import os
import sys
Expand Down
@@ -1,10 +1,11 @@
#!/usr/bin/env python
from __future__ import absolute_import
import os
import sys
import commands
import time
import optparse
import Config
from . import Config

usage = 'usage: %prog [options]'
parser = optparse.OptionParser(usage)
Expand Down
12 changes: 4 additions & 8 deletions CalibTracker/SiStripDCS/test/ParseManualO2Olog.py
Expand Up @@ -730,10 +730,8 @@ def getIOVsInTimeInterval(self,StartTime,StopTime,HistoryDict):
"""
Function that returns the IOV timestamps (ordered) contained in a given interval.
"""
#Copy the timestamps in a list
TimeStamps=HistoryDict.keys()[:]
#Sort them:
TimeStamps.sort()
#Copy and sort the timestamps in a list
TimeStamps=sorted(HistoryDict.keys()[:])
IOVsInTimeInterval=[]
#loop over them:
for timestamp in TimeStamps:
Expand All @@ -749,10 +747,8 @@ def getReducedIOVs (self,StartTime,StopTime,HistoryDict,deltaT,maxIOVLength):
"""
deltaTime=datetime.timedelta(seconds=deltaT)
maxSequenceLength=datetime.timedelta(seconds=maxIOVLength)
#Copy the timestamps in a list:
TimeStamps=HistoryDict.keys()[:]
#Sort them:
TimeStamps.sort()
#Copy and sort the timestamps in a list:
TimeStamps=sorted(HistoryDict.keys()[:])
ReducedIOVs=TimeStamps[:]
PreviousTimestamp=TimeStamps[0]
SequenceStart=TimeStamps[0] #Initialization irrelevant see loop
Expand Down
2 changes: 1 addition & 1 deletion CondCore/PopCon/test/fetchCronLogTail.py
Expand Up @@ -28,7 +28,7 @@ def readConnection(fileName):
db=""
account=""
connection=""
while 1:
while True:
line= f.readline()
line=line.strip()
if line =="":
Expand Down
6 changes: 3 additions & 3 deletions CondCore/Utilities/python/CondDBFW/data_formats.py
Expand Up @@ -29,7 +29,7 @@ def to_datatables(script):
def new_script(self, connection):
try:
data = script(self, connection)
if(type(data) == list):
if(isinstance(data, list)):
data = _json_data_node.make(data)
return to_datatables(data)
except (KeyError, TypeError) as e:
Expand Down Expand Up @@ -63,7 +63,7 @@ def _to_array_of_dicts(data):
headers = data.get("headers").data()
data_list = data.get("data").data()
def unicode_to_str(string):
return str(string) if type(string) == unicode else string
return str(string) if isinstance(string, unicode) else string
headers = map(unicode_to_str, headers)
def row_to_dict(row):
row = map(unicode_to_str, row)
Expand All @@ -75,7 +75,7 @@ def _to_datatables(data):
headers = map(str, data.get(0).data().keys())
new_data = []
for n in range(0, len(data.data())):
new_data.append(map(lambda entry : str(entry) if type(entry) == unicode else entry, data.get(n).data().values()))
new_data.append(map(lambda entry : str(entry) if isinstance(entry, unicode) else entry, data.get(n).data().values()))
return json_data_node.make({
"headers" : headers,
"data" : new_data
Expand Down
10 changes: 5 additions & 5 deletions CondCore/Utilities/python/CondDBFW/data_sources.py
Expand Up @@ -127,9 +127,9 @@ def __init__(self, data=None):
# be created in code that shouldn't be doing it.
@staticmethod
def make(data):
if type(data) == list:
if isinstance(data, list):
return json_list(data)
elif type(data) == dict:
elif isinstance(data, dict):
return json_dict(data)
else:
return json_basic(data)
Expand Down Expand Up @@ -157,12 +157,12 @@ def find(self, type_name):
# traverse json_data_node structure, and find all lists
# if this node in the structure is a list, return all sub lists
lists = []
if type(self._data) == type_name:
if isinstance(self._data, type_name):
lists.append(self._data)
if type(self._data) == list:
if isinstance(self._data, list):
for item in self._data:
lists += json_data_node.make(item).find(type_name)
elif type(self._data) == dict:
elif isinstance(self._data, dict):
for key in self._data:
lists += json_data_node.make(self._data[key]).find(type_name)
return lists
Expand Down
8 changes: 4 additions & 4 deletions CondCore/Utilities/python/CondDBFW/models.py
Expand Up @@ -48,7 +48,7 @@ def session_independent_object(object, schema=None):
return new_object

def session_independent(objects):
if type(objects) == list:
if isinstance(objects, list):
return map(session_independent_object, objects)
else:
# assume objects is a single object (not a list)
Expand Down Expand Up @@ -151,17 +151,17 @@ def apply(self):

def apply_filter(orm_query, orm_class, attribute, value):
filter_attribute = getattr(orm_class, attribute)
if type(value) == list:
if isinstance(value, list):
orm_query = orm_query.filter(filter_attribute.in_(value))
elif type(value) == data_sources.json_list:
elif isinstance(value, data_sources.json_list):
orm_query = orm_query.filter(filter_attribute.in_(value.data()))
elif type(value) in [Range, Radius]:

minus = value.get_start()
plus = value.get_end()
orm_query = orm_query.filter(and_(filter_attribute >= minus, filter_attribute <= plus))

elif type(value) == RegExp:
elif isinstance(value, RegExp):

# Relies on being a SingletonThreadPool

Expand Down
10 changes: 5 additions & 5 deletions CondCore/Utilities/python/CondDBFW/querying.py
Expand Up @@ -78,7 +78,7 @@ def setup(self):
Setup engine with given credentials from netrc file, and make a session maker.
"""

if type(self.connection_data) == dict:
if isinstance(self.connection_data, dict):
self.engine = engine_from_dictionary(self.connection_data, pooling=self._pooling)
else:
# we've been given an engine by the user
Expand All @@ -95,7 +95,7 @@ def setup(self):
if self.models[key].__class__ == sqlalchemy.ext.declarative.api.DeclarativeMeta\
and str(self.models[key].__name__) != "Base":

if type(self.connection_data) == dict:
if isinstance(self.connection_data, dict):
# we can only extract the secrets and schema individuall
# if we were given a dictionary... if we were given an engine
# we can't do this without parsing the connection string from the engine
Expand Down Expand Up @@ -275,7 +275,7 @@ def commit(self):
self.session.rollback()

def write_and_commit(self, object):
if type(object) == list:
if isinstance(object, list):
for item in object:
self.write_and_commit(item)
else:
Expand Down Expand Up @@ -399,14 +399,14 @@ def new_connection_dictionary(connection_data, secrets=None, mode="r"):
username = str(raw_input("Enter the username you want to connect to the schema '%s' with: " % (schema_name)))
password = str(raw_input("Enter the password for the user '%s' in database '%s': " % (username, database_name)))
else:
if type(secrets) == str:
if isinstance(secrets, str):
netrc_key = "%s/%s/%s" % (database_name, schema_name, mode_to_netrc_key_suffix[mode])
netrc_data = _get_netrc_data(secrets, key=netrc_key)
# take the username from the netrc entry corresponding to the mode the database is opened in
# eg, if the user has given mode="read", the database_name/schema_name/read entry will be taken
username = netrc_data["login"]
password = netrc_data["password"]
elif type(secrets) == dict:
elif isinstance(secrets, dict):
username = secrets["user"]
password = secrets["password"]
else:
Expand Down