Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

python modernize 2to3 tool fix_filter #23454

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -22,6 +22,5 @@ def setCondition(process,
args["label"] = cms.untracked.string(label)

process.GlobalTag.toGet \
= cms.VPSet(filter(lambda x: x.record.value() != record,
process.GlobalTag.toGet.value()))
= cms.VPSet([x for x in process.GlobalTag.toGet.value() if x.record.value() != record])
process.GlobalTag.toGet.append(cms.PSet(**args))
Expand Up @@ -43,8 +43,7 @@ def get_ndiscriminator(self, objid):
for structure in discriminator:
ndiscriminator[structure].append(getattr(entry, structure))
for structure in discriminator:
ndiscriminator[structure] = filter(lambda x: x != 0,
ndiscriminator[structure])
ndiscriminator[structure] = [x for x in ndiscriminator[structure] if x != 0]

return [len(set(ndiscriminator[structure]))
for structure in discriminator]
Expand Down
4 changes: 2 additions & 2 deletions Alignment/MillePedeAlignmentAlgorithm/scripts/mps_alisetup.py
Expand Up @@ -156,7 +156,7 @@ def _fetch_external_datasets(self):
datasets = map(lambda x: x.strip(),
self._config.get("general",
"externalDatasets").split(","))
datasets = filter(lambda x: len(x.strip()) > 0, datasets)
datasets = [x for x in datasets if len(x.strip()) > 0]
for item in datasets:
splitted = item.split("|")
dataset = splitted[0].strip()
Expand Down Expand Up @@ -328,7 +328,7 @@ def _create_mille_jobs(self):
"cmscafuser:"+self._mss_dir]
if dataset["numberOfEvents"] > 0:
command.extend(["--max-events", str(dataset["numberOfEvents"])])
command = filter(lambda x: len(x.strip()) > 0, command)
command = [x for x in command if len(x.strip()) > 0]

# Some output:
print "Creating jobs for dataset:", name
Expand Down
Expand Up @@ -593,8 +593,7 @@ def _get_track_collection(self, edm_file):
"'{}'.".format(track_collections[0]))
return track_collections[0]
else:
alcareco_tracks = filter(lambda x: x.startswith("ALCARECO"),
track_collections)
alcareco_tracks = [x for x in track_collections if x.startswith("ALCARECO")]
if len(alcareco_tracks) == 0 and "generalTracks" in track_collections:
print_msg("\tDetermined track collection as 'generalTracks'.")
return "generalTracks"
Expand Down
6 changes: 3 additions & 3 deletions CalibTracker/SiStripDCS/test/MakeTkMaps.py
Expand Up @@ -22,7 +22,7 @@ def ProduceTkMapVoltageInputFiles(workdir=os.getcwd()): #Setting the dir by defa
"""
#Get all the files in the directory workdir (1 file per IOV):
print "Analysing %s directory"%workdir
logfilenames=filter(lambda x: x.startswith("DetVOffReaderDebug"), os.listdir(workdir))
logfilenames=[x for x in os.listdir(workdir) if x.startswith("DetVOffReaderDebug")]
if logfilenames:
print "Processing %s logfiles..."%len(logfilenames)
else:
Expand Down Expand Up @@ -119,7 +119,7 @@ def CreateTkVoltageMapsCfgs(workdir=os.getcwd()): #Default to current working di
It returns the list of cfgs ready to be cmsRun to produce the maps
"""
#Use HV log files to loop... could use also LV logs...
HVLogs=filter(lambda x: x.startswith("HV") and "FROM" in x and x.endswith(".log"),os.listdir(workdir))
HVLogs=[x for x in os.listdir(workdir) if x.startswith("HV") and "FROM" in x and x.endswith(".log")]

#Open the file to use as template
TkMapCreatorTemplateFile=open(os.path.join(os.getenv("CMSSW_BASE"),"src/CalibTracker/SiStripDCS/test","TkVoltageMapCreator_cfg.py"),"r")
Expand Down Expand Up @@ -157,7 +157,7 @@ def CreateTkVoltageMaps(workdir=os.getcwd()): #Default to current working direct
Function that looks for TkVoltageMap*cfg.py in the workdir directory and launches each of them
creating 2 TkVoltageMaps per IOV, one for LV and one of HV status (each as a png file).
"""
TkMapCfgs=filter(lambda x: x.startswith("TkVoltageMap") and "FROM" in x and x.endswith("cfg.py"),os.listdir(workdir))
TkMapCfgs=[x for x in os.listdir(workdir) if x.startswith("TkVoltageMap") and "FROM" in x and x.endswith("cfg.py")]
for TkMapCfg in TkMapCfgs:
#Make sure we run the cfg in the workdir and also the logfile is saved there...
TkMapCfg=os.path.join(workdir,TkMapCfg)
Expand Down
6 changes: 2 additions & 4 deletions CondCore/Utilities/python/CondDBFW/uploads.py
Expand Up @@ -52,9 +52,7 @@ def new_log_file_id():
"""
# new id = number of log files + 1
# (primitive - matching the hash of the upload session may be a better idea)
log_files = filter(lambda file : "upload_log" in file,
os.listdir(os.path.join(os.getcwd(), "upload_logs"))
)
log_files = [file for file in os.listdir(os.path.join(os.getcwd(), "upload_logs")) if "upload_log" in file]
new_id = len(log_files)+1
return new_id

Expand Down Expand Up @@ -601,7 +599,7 @@ def filter_iovs_by_fcsr(self, upload_session_id):

# only select iovs that have sinces >= max_since_below_dest
# and then shift any IOVs left to the destination since
self.data_to_send["iovs"] = filter(lambda iov : iov["since"] >= max_since_below_dest, self.data_to_send["iovs"])
self.data_to_send["iovs"] = [iov for iov in self.data_to_send["iovs"] if iov["since"] >= max_since_below_dest]
for (i, iov) in enumerate(self.data_to_send["iovs"]):
if self.data_to_send["iovs"][i]["since"] < self.data_to_send["since"]:
self.data_to_send["iovs"][i]["since"] = self.data_to_send["since"]
Expand Down
16 changes: 4 additions & 12 deletions CondTools/BTau/python/checkBTagCalibrationConsistency.py
Expand Up @@ -16,7 +16,7 @@

def _eta_pt_discr_entries_generator(filter_keyfunc, op):
assert data
entries = filter(filter_keyfunc, data.entries)
entries = list(filter(filter_keyfunc, data.entries))

# use full or half eta range?
if any(e.params.etaMin < 0. for e in entries):
Expand All @@ -26,19 +26,11 @@ def _eta_pt_discr_entries_generator(filter_keyfunc, op):

for eta in eta_test_points:
for pt in data.pt_test_points:
ens_pt_eta = filter(
lambda e:
e.params.etaMin < eta < e.params.etaMax and
e.params.ptMin < pt < e.params.ptMax,
entries
)
ens_pt_eta = [e for e in entries if e.params.etaMin < eta < e.params.etaMax and
e.params.ptMin < pt < e.params.ptMax]
if op == 3:
for discr in data.discr_test_points:
ens_pt_eta_discr = filter(
lambda e:
e.params.discrMin < discr < e.params.discrMax,
ens_pt_eta
)
ens_pt_eta_discr = [e for e in ens_pt_eta if e.params.discrMin < discr < e.params.discrMax]
yield eta, pt, discr, ens_pt_eta_discr
else:
yield eta, pt, None, ens_pt_eta
Expand Down
2 changes: 1 addition & 1 deletion CondTools/BTau/python/dataLoader.py
Expand Up @@ -183,7 +183,7 @@ def get_data_csv(csv_data):
for op in ops
for fl in flavs
)
lds = filter(lambda d: d.entries, lds)
lds = [d for d in lds if d.entries]
return lds


Expand Down
5 changes: 1 addition & 4 deletions CondTools/BTau/python/generateFlavCfromFlavB.py
Expand Up @@ -8,10 +8,7 @@


def generate_flav_c(loaded_data):
flav_b_data = filter(
lambda e: e.params.jetFlavor == 0,
loaded_data.entries
)
flav_b_data = [e for e in loaded_data.entries if e.params.jetFlavor == 0]
flav_b_data = sorted(flav_b_data, key=lambda e: e.params.operatingPoint)
flav_b_data = sorted(flav_b_data, key=lambda e: e.params.measurementType)
flav_b_data = sorted(flav_b_data, key=lambda e: e.params.etaMin)
Expand Down
2 changes: 1 addition & 1 deletion Configuration/DataProcessing/python/Impl/AlCa.py
Expand Up @@ -60,7 +60,7 @@ def alcaSkim(self, skims, **args):
"""
step = ""
pclWflws = [x for x in skims if "PromptCalibProd" in x]
skims = filter(lambda x: x not in pclWflws, skims)
skims = [x for x in skims if x not in pclWflws]

if len(pclWflws):
step += 'ALCA:'+('+'.join(pclWflws))
Expand Down
2 changes: 1 addition & 1 deletion Configuration/DataProcessing/python/Reco.py
Expand Up @@ -213,7 +213,7 @@ def alcaSkim(self, skims, **args):

step = ""
pclWflws = [x for x in skims if "PromptCalibProd" in x]
skims = filter(lambda x: x not in pclWflws, skims)
skims = [x for x in skims if x not in pclWflws]

if len(pclWflws):
step += 'ALCA:'+('+'.join(pclWflws))
Expand Down
2 changes: 1 addition & 1 deletion Configuration/PyReleaseValidation/python/relval_upgrade.py
Expand Up @@ -76,7 +76,7 @@ def makeStepName(key,frag,step,suffix):
# skip ALCA
trackingVariations = ['trackingOnly','trackingRun2','trackingOnlyRun2','trackingLowPU','pixelTrackingOnly']
for tv in trackingVariations:
stepList[tv] = filter(lambda s : "ALCA" not in s, stepList[tv])
stepList[tv] = [s for s in stepList[tv] if "ALCA" not in s]
workflows[numWF+upgradeSteps['trackingOnly']['offset']] = [ upgradeDatasetFromFragment[frag], stepList['trackingOnly']]
if '2017' in key:
for tv in trackingVariations[1:]:
Expand Down
2 changes: 1 addition & 1 deletion DQM/SiPixelPhase1Common/python/SpecificationBuilder_cfi.py
Expand Up @@ -78,7 +78,7 @@ def __deepcopy__(self, memo):
return t

def groupBy(self, cols, mode = "SUM"):
cnames = filter(len, val(cols).split("/")) # omit empty items
cnames = list(filter(len, val(cols).split("/"))) # omit empty items
newstate = self._state

# The behaviour of groupBy depends a lot on when it happens:
Expand Down
2 changes: 1 addition & 1 deletion DQMServices/FileIO/scripts/dqmMemoryStats.py
Expand Up @@ -93,7 +93,7 @@ def displayDirectoryStatistics(stats, args):
as_list.sort(reverse=True, key=lambda v_k1: abs(v_k1[0]))

if cutoff is not None:
as_list = filter(lambda v_k: abs(v_k[0]) > cutoff, as_list)
as_list = [v_k for v_k in as_list if abs(v_k[0]) > cutoff]

if display is not None:
as_list = as_list[:display]
Expand Down
4 changes: 2 additions & 2 deletions FWCore/GuiBrowsers/python/EnablePSetHistory.py
Expand Up @@ -307,10 +307,10 @@ def new_dumpModifications(self, comments=True, process=True, module=False, seque
for name, o in self.items_():
modifications += self.recurseDumpModifications_(name, o)
if not sequence:
modifications = filter(lambda x: not x['type'] == 'seq', modifications)
modifications = [x for x in modifications if not x['type'] == 'seq']
checkpoint = self.__dict__['_Process__modifiedcheckpoint']
if not checkpoint == None:
modifications = filter(lambda x: any([x['name'].startswith(check) for check in checkpoint]), modifications)
modifications = [x for x in modifications if any([x['name'].startswith(check) for check in checkpoint])]
if module:
value = False
comments = False
Expand Down
4 changes: 2 additions & 2 deletions FWCore/GuiBrowsers/python/Vispa/Gui/ConnectableWidget.py
Expand Up @@ -289,15 +289,15 @@ def sinkPorts(self):
return [port for port in self._ports if port.portType() == "sink"]
def isSink(port):
return port.portType() == 'sink'
return filter(isSink, self._ports)
return list(filter(isSink, self._ports))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we understand this change? Does filter return a generator in python 3 while the old filter returned a list?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

so now I understand you consider this a problem. not sure what alternatives there are. I'll have a look

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

seems an easy replacement is

return [item for item in self._ports if isSink(item)]

I'd propose to handle the 13 changes (mostly in obsolete code and all in non-performance critical code) for a followup pr.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I concur.


def sourcePorts(self):
""" Returns list of all source ports set.
"""
return [port for port in self._ports if port.portType() == "source"]
def isSource(port):
return port.portType() == 'source'
return filter(isSource, self._ports)
return list(filter(isSource, self._ports))

def sinkPort(self, name):
""" Returns sink port with given name or None if no such port is found.
Expand Down
2 changes: 1 addition & 1 deletion HLTriggerOffline/Btag/python/readConfig.py
Expand Up @@ -18,7 +18,7 @@ def read(self):
# self.denominatorTriggerPath=ConfigSectionMap("config")["denominatorTriggerPath"]

files=files.splitlines()
self.files=filter(lambda x: len(x)>0,files)
self.files=[x for x in files if len(x)>0]

self.btag_modules=cms.VInputTag()
self.btag_pathes=cms.vstring()
Expand Down
2 changes: 1 addition & 1 deletion IOMC/RandomEngine/python/RandomServiceHelper.py
Expand Up @@ -57,7 +57,7 @@ def __psetsWithSeeds(self):

#print svcAttrs

return filter(self.__containsSeed, svcAttrs)
return list(filter(self.__containsSeed, svcAttrs))


def countSeeds(self):
Expand Down
2 changes: 1 addition & 1 deletion L1Trigger/L1TMuonBarrel/test/kalmanTools/createGains.py
Expand Up @@ -10,7 +10,7 @@ def getBit(q,i):
def fetchKMTF(event,etaMax=0.83,chi2=800000,dxyCut=100000):
kmtfH = Handle('vector<L1MuKBMTrack>')
event.getByLabel('simKBmtfDigis',kmtfH)
kmtf=filter(lambda x: abs(x.eta())<etaMax and x.approxChi2()<chi2 and abs(x.dxy())<dxyCut,kmtfH.product())
kmtf=[x for x in kmtfH.product() if abs(x.eta())<etaMax and x.approxChi2()<chi2 and abs(x.dxy())<dxyCut]
return sorted(kmtf,key=lambda x: x.pt(),reverse=True)

####Save the Kalman Gains for LUTs
Expand Down
Expand Up @@ -103,7 +103,7 @@ def process(self, event):
event.rhoCN = self.handles['rhoCN'].product()[0]
event.sigma = self.handles['sigma'].product()[0] if self.handles['sigma'].isValid() else -999
event.vertices = self.handles['vertices'].product()
event.goodVertices = filter(self.testGoodVertex,event.vertices)
event.goodVertices = list(filter(self.testGoodVertex,event.vertices))


self.count.inc('All Events')
Expand Down
2 changes: 1 addition & 1 deletion PhysicsTools/Heppy/python/physicsutils/VBF.py
Expand Up @@ -79,7 +79,7 @@ def isCentral( jet ):
return True
else:
return False
centralJets = filter( isCentral, otherJets )
centralJets = list(filter( isCentral, otherJets ))
return centralJets

def calcP4(self, jets):
Expand Down
4 changes: 2 additions & 2 deletions PhysicsTools/Heppy/python/physicsutils/genutils.py
Expand Up @@ -29,14 +29,14 @@ def allDaughters(particle, daughters, rank ):


def bosonToX(particles, bosonType, xType):
bosons = filter(lambda x: x.status()==3 and x.pdgId()==bosonType, particles)
bosons = [x for x in particles if x.status()==3 and x.pdgId()==bosonType]
daughters = []
if len(bosons)==0:
return [], False
boson = bosons[0]
daus = []
allDaughters( boson, daus, 0)
xDaus = filter(lambda x: x.status()==3 and abs(x.pdgId())==xType, daus)
xDaus = [x for x in daus if x.status()==3 and abs(x.pdgId())==xType]
# print printOut(xDaus)
return xDaus, True

Expand Down
4 changes: 2 additions & 2 deletions PhysicsTools/HeppyCore/python/utils/deltar.py
Expand Up @@ -54,7 +54,7 @@ def matchObjectCollection3 ( objects, matchCollection, deltaRMax = 0.3, filter =

objectCoords = [ (o.eta(),o.phi(),o) for o in objects ]
matchdCoords = [ (o.eta(),o.phi(),o) for o in matchCollection ]
allPairs = sorted([(deltaR2 (oeta, ophi, meta, mphi), (object, match)) for (oeta,ophi,object) in objectCoords for (meta,mphi,match) in matchdCoords if abs(oeta-meta)<=deltaRMax and filter(object,match) ])
allPairs = sorted([(deltaR2 (oeta, ophi, meta, mphi), (object, match)) for (oeta,ophi,object) in objectCoords for (meta,mphi,match) in matchdCoords if abs(oeta-meta)<=deltaRMax and list(filter(object,match)) ])
#allPairs = [(deltaR2 (object.eta(), object.phi(), match.eta(), match.phi()), (object, match)) for object in objects for match in matchCollection if filter(object,match) ]
#
# to flag already matched objects
Expand Down Expand Up @@ -155,7 +155,7 @@ def matchObjectCollection( objects, matchCollection, deltaR2Max, filter = lambda
if len(matchCollection)==0:
return dict( list(zip(objects, [None]*len(objects))) )
for object in objects:
bm, dr2 = bestMatch( object, [mob for mob in matchCollection if filter(object,mob)] )
bm, dr2 = bestMatch( object, [mob for mob in matchCollection if list(filter(object,mob))] )
if dr2<deltaR2Max:
pairs[object] = bm
else:
Expand Down
4 changes: 2 additions & 2 deletions PhysicsTools/RooStatsCms/test/testCrabToyMC_summary.py
Expand Up @@ -42,13 +42,13 @@

# retrieve the targz files
for directory in sys.argv[1:]:
for targzfile in filter(lambda name: ".tgz" in name, os.listdir(directory)):
for targzfile in [name for name in os.listdir(directory) if ".tgz" in name]:
targz_files.append(directory+"/"+targzfile)

# inspect them
for targzfile in targz_files:
f = tarfile.open(targzfile,"r:gz")
for txtfilename in filter(lambda name: ".txt" in name, f.getnames()):
for txtfilename in [name for name in f.getnames() if ".txt" in name]:
print "Xtracting ",txtfilename
txtfile = f.extractfile(txtfilename)
for line in txtfile.readlines():
Expand Down
2 changes: 1 addition & 1 deletion RecoLuminosity/LumiDB/python/checkforupdate.py
Expand Up @@ -21,7 +21,7 @@ def runningVersion(self,cmsswWorkingBase,scriptname,isverbose=True):
cleanresult=re.sub(r'\s+|\t+',' ',cleanresult)
allfields=cleanresult.split(' ')
workingversion = "n/a"
for line in filter(lambda line: "Sticky Tag" in line, result.split('\n')):
for line in [line for line in result.split('\n') if "Sticky Tag" in line]:
workingversion = line.split()[2]
if workingversion=='(none)':
workingversion='HEAD'
Expand Down
2 changes: 1 addition & 1 deletion RecoLuminosity/LumiDB/scripts/lumiDBFiller.py
Expand Up @@ -44,7 +44,7 @@ def getRunsToBeUploaded(connectionString, dropbox, authpath='',minrun=180250):
# check if there are new runs to be uploaded
#command = 'ls -ltr '+dropbox
p=re.compile('^CMS_LUMI_RAW_\d\d\d\d\d\d\d\d_\d\d\d\d\d\d\d\d\d_\d\d\d\d_\d.root$')
files=filter(os.path.isfile,[os.path.join(dropbox,x) for x in os.listdir(dropbox) if p.match(x)])
files=list(filter(os.path.isfile,[os.path.join(dropbox,x) for x in os.listdir(dropbox) if p.match(x)]))
files.sort(key=lambda x: os.path.getmtime(os.path.join(dropbox,x)))
#print 'sorted files ',files
#print files
Expand Down
2 changes: 1 addition & 1 deletion SimMuon/Configuration/python/customizeMuonDigi.py
Expand Up @@ -18,7 +18,7 @@
# - drop unnecessary mixObjects
def customize_mix_muon_only(process):
process.mix.digitizers = digitizers = cms.PSet()
digi_aliases = filter(lambda n: 'Digi' in n, process.aliases.keys())
digi_aliases = [n for n in process.aliases.keys() if 'Digi' in n]
for a in digi_aliases: process.__delattr__(a)
from SimGeneral.MixingModule.mixObjects_cfi import theMixObjects
process.mix.mixObjects = theMixObjects
Expand Down
6 changes: 3 additions & 3 deletions Utilities/RelMon/python/directories2html.py
Expand Up @@ -285,7 +285,7 @@ def get_comparisons(category,directory):
tot_counter=1

# get the right ones
comparisons= filter (lambda comp: comp.status == cat_states[category] , directory.comparisons)
comparisons= [comp for comp in directory.comparisons if comp.status == cat_states[category]]
n_comparisons=len(comparisons)

is_reverse=True
Expand Down Expand Up @@ -549,7 +549,7 @@ def get_aggr_pairs_info(dir_dict,the_aggr_pairs=[]):
present_subdirs[subdirname]={"nsucc":nsucc,"weight":weight}
# Make it usable also for subdirectories
for subsubdirname,subsubdir in subdir.get_subdirs_dict().items():
for pathname in filter(lambda name:"/" in name,subdir_list):
for pathname in [name for name in subdir_list if "/" in name]:
selected_subdirname,selected_subsubdirname = pathname.split("/")
if selected_subdirname == subdirname and selected_subsubdirname==subsubdirname:
#print "Studying directory ",subsubdirname," in directory ",subdirname
Expand Down Expand Up @@ -710,7 +710,7 @@ def make_summary_table(indir,aggregation_rules,aggregation_rules_twiki, hashing_


# Get the list of pickles
sample_pkls=filter(lambda name: name.endswith(".pkl"),listdir("./"))
sample_pkls=[name for name in listdir("./") if name.endswith(".pkl")]

# Load directories, build a list of all first level subdirs
dir_unpicklers=[]
Expand Down
4 changes: 2 additions & 2 deletions Utilities/RelMon/python/dirstructure.py
Expand Up @@ -112,7 +112,7 @@ def calcStats(self,make_pie=True):
print " [*] Missing in %s: %s" %(self.filename1, self.different_histograms['file1'])
print " [*] Missing in %s: %s" %(self.filename2, self.different_histograms['file2'])
# clean from empty dirs
self.subdirs = filter(lambda subdir: not subdir.is_empty(),self.subdirs)
self.subdirs = [subdir for subdir in self.subdirs if not subdir.is_empty()]

for comp in self.comparisons:
if comp.status == SKIPED: #in case its in black list & skiped
Expand Down Expand Up @@ -182,7 +182,7 @@ def print_report(self,indent="",verbose=False):
self.calcStats(make_pie=False)
# print small failure report
if verbose:
fail_comps=filter(lambda comp:comp.status==FAIL,self.comparisons)
fail_comps=[comp for comp in self.comparisons if comp.status==FAIL]
fail_comps=sorted(fail_comps,key=lambda comp:comp.name )
if len(fail_comps)>0:
print indent+"* %s/%s:" %(self.mother_dir,self.name)
Expand Down
2 changes: 1 addition & 1 deletion Utilities/RelMon/python/dqm_interfaces.py
Expand Up @@ -594,7 +594,7 @@ def ls(self,directory_name=""):
contents={}
self.different_histograms['file1']= {}
self.different_histograms['file2']= {}
keys = filter(lambda key: key in contents1,contents2.keys()) #set of all possible contents from both files
keys = [key for key in contents2.keys() if key in contents1] #set of all possible contents from both files
#print " ## keys: %s" %(keys)
for key in keys: #iterate on all unique keys
if contents1[key]!=contents2[key]:
Expand Down