From 927e11d64d19cfcfcb4e54379d64a13ee359d7fb Mon Sep 17 00:00:00 2001 From: mteroerd Date: Mon, 19 Nov 2018 17:20:41 +0100 Subject: [PATCH] Added support for condor instead of LSF --- .../APEEstimation/macros/DrawIteration.C | 146 +++--- .../APEEstimation/plugins/ApeEstimator.cc | 2 - .../APEEstimation/scripts/initialise.bash | 29 -- .../test/autoSubmitter/autoSubmitter.py | 441 +++++++++++++----- .../autoSubmitter/autoSubmitterTemplates.py | 39 +- .../test/autoSubmitter/config.ini | 25 +- .../APEEstimation/test/batch/startSkim.py | 7 +- .../test/cfgTemplate/batchSubmitTemplate.tcsh | 27 -- .../test/cfgTemplate/createStep2.bash | 94 ---- .../test/cfgTemplate/startStep1.bash | 15 - .../test/cfgTemplate/startStep2.bash | 14 - .../test/cfgTemplate/summaryTemplate.bash | 27 -- .../test/cfgTemplate/writeSubmitScript.sh | 44 -- .../test/cfgTemplateData/createStep1.bash | 64 --- .../manual_run10Iterations.bash | 19 - .../manual_run15Iterations.bash | 19 - .../test/cfgTemplateData/run10Iterations.bash | 58 --- .../test/cfgTemplateData/run15Iterations.bash | 58 --- .../test/cfgTemplateDesign/createStep1.bash | 54 --- .../test/cfgTemplateMc/createStep1.bash | 52 --- .../cfgTemplateMc/manual_run10Iterations.bash | 19 - .../cfgTemplateMc/manual_run15Iterations.bash | 19 - .../test/cfgTemplateMc/run10Iterations.bash | 58 --- .../test/cfgTemplateMc/run15Iterations.bash | 58 --- .../test/plottingTools/drawIterations.py | 17 + .../test/plottingTools/drawResults.py | 21 + .../test/plottingTools/granularity.py | 20 + .../test/plottingTools/iterationsPlotter.py | 129 +++++ .../test/plottingTools/resultPlotter.py | 186 ++++++++ .../test/plottingTools/setTDRStyle.py | 170 +++++++ .../test/plottingTools/systematicErrors.py | 116 +++++ 31 files changed, 1102 insertions(+), 945 deletions(-) delete mode 100644 Alignment/APEEstimation/test/cfgTemplate/batchSubmitTemplate.tcsh delete mode 100644 Alignment/APEEstimation/test/cfgTemplate/createStep2.bash delete mode 100644 Alignment/APEEstimation/test/cfgTemplate/startStep1.bash delete mode 100644 Alignment/APEEstimation/test/cfgTemplate/startStep2.bash delete mode 100644 Alignment/APEEstimation/test/cfgTemplate/summaryTemplate.bash delete mode 100644 Alignment/APEEstimation/test/cfgTemplate/writeSubmitScript.sh delete mode 100644 Alignment/APEEstimation/test/cfgTemplateData/createStep1.bash delete mode 100644 Alignment/APEEstimation/test/cfgTemplateData/manual_run10Iterations.bash delete mode 100644 Alignment/APEEstimation/test/cfgTemplateData/manual_run15Iterations.bash delete mode 100644 Alignment/APEEstimation/test/cfgTemplateData/run10Iterations.bash delete mode 100644 Alignment/APEEstimation/test/cfgTemplateData/run15Iterations.bash delete mode 100644 Alignment/APEEstimation/test/cfgTemplateDesign/createStep1.bash delete mode 100644 Alignment/APEEstimation/test/cfgTemplateMc/createStep1.bash delete mode 100644 Alignment/APEEstimation/test/cfgTemplateMc/manual_run10Iterations.bash delete mode 100644 Alignment/APEEstimation/test/cfgTemplateMc/manual_run15Iterations.bash delete mode 100644 Alignment/APEEstimation/test/cfgTemplateMc/run10Iterations.bash delete mode 100644 Alignment/APEEstimation/test/cfgTemplateMc/run15Iterations.bash create mode 100644 Alignment/APEEstimation/test/plottingTools/drawIterations.py create mode 100644 Alignment/APEEstimation/test/plottingTools/drawResults.py create mode 100644 Alignment/APEEstimation/test/plottingTools/granularity.py create mode 100644 Alignment/APEEstimation/test/plottingTools/iterationsPlotter.py create mode 100644 Alignment/APEEstimation/test/plottingTools/resultPlotter.py create mode 100644 Alignment/APEEstimation/test/plottingTools/setTDRStyle.py create mode 100644 Alignment/APEEstimation/test/plottingTools/systematicErrors.py diff --git a/Alignment/APEEstimation/macros/DrawIteration.C b/Alignment/APEEstimation/macros/DrawIteration.C index a8c89031de9e8..75ca2065ba1b5 100644 --- a/Alignment/APEEstimation/macros/DrawIteration.C +++ b/Alignment/APEEstimation/macros/DrawIteration.C @@ -679,82 +679,82 @@ void DrawIteration::drawFinals(const std::string& xOrY){ std::vector >::iterator i_hist; unsigned int iHist(1); for(i_hist = v_hist.begin(); i_hist != v_hist.end(); ++i_hist, ++iHist){ - TH1* hist((*i_hist).first); - if(iHist==1){ - hist->Draw("e0"); + TH1* hist((*i_hist).first); + if(iHist==1){ + hist->Draw("e0"); + } + else{ + hist->SetLineColor(iHist); + hist->SetMarkerColor(iHist); + hist->Draw("e0same"); + } } - else{ - hist->SetLineColor(iHist); - hist->SetMarkerColor(iHist); - hist->Draw("e0same"); - } - } TH1* systHist(nullptr); if(systematics_){ - const std::vector& v_name(*i_resultHist); - - bool pixel(false); - bool tob(false); - std::vector::const_iterator i_name; - for(i_name=v_name.begin(); i_name!=v_name.end(); ++i_name){ - const TString name((*i_name).c_str()); - if(name.BeginsWith("Bpix") || name.BeginsWith("Fpix")){ - pixel = true; - break; - } - if(name.BeginsWith("Tob")){ - tob = true; - break; - } - } - if(pixel || tob)systHist = new TH1F("systematics", "sytematics", v_name.size(), 0, v_name.size()); - if(pixel){ - if(xOrY=="x"){ - systHist->SetBinContent(1, 10.); - systHist->SetBinContent(2, 10.); - systHist->SetBinContent(3, 10.); - systHist->SetBinContent(4, 10.); - systHist->SetBinContent(5, 10.); - systHist->SetBinContent(6, 10.); - systHist->SetBinContent(7, 10.); - systHist->SetBinContent(8, 10.); - systHist->SetBinContent(9, 10.); - systHist->SetBinContent(10, 10.); - systHist->SetBinContent(13, 5.); - } - else if(xOrY=="y"){ - systHist->SetBinContent(1, 15.); - systHist->SetBinContent(2, 15.); - systHist->SetBinContent(3, 15.); - systHist->SetBinContent(4, 20.); - systHist->SetBinContent(5, 15.); - systHist->SetBinContent(6, 15.); - systHist->SetBinContent(7, 15.); - systHist->SetBinContent(8, 15.); - systHist->SetBinContent(9, 15.); - systHist->SetBinContent(10, 15.); - systHist->SetBinContent(13, 5.); - } - } - if(tob){ - systHist->SetBinContent(1, 15.); - systHist->SetBinContent(2, 15.); - systHist->SetBinContent(3, 10.); - systHist->SetBinContent(4, 10.); - systHist->SetBinContent(5, 10.); - systHist->SetBinContent(6, 10.); - systHist->SetBinContent(7, 15.); - systHist->SetBinContent(8, 10.); - } + const std::vector& v_name(*i_resultHist); + + bool pixel(false); + bool tob(false); + std::vector::const_iterator i_name; + for(i_name=v_name.begin(); i_name!=v_name.end(); ++i_name){ + const TString name((*i_name).c_str()); + if(name.BeginsWith("Bpix") || name.BeginsWith("Fpix")){ + pixel = true; + break; + } + if(name.BeginsWith("Tob")){ + tob = true; + break; + } + } + if(pixel || tob)systHist = new TH1F("systematics", "sytematics", v_name.size(), 0, v_name.size()); + if(pixel){ + if(xOrY=="x"){ + systHist->SetBinContent(1, 10.); + systHist->SetBinContent(2, 10.); + systHist->SetBinContent(3, 10.); + systHist->SetBinContent(4, 10.); + systHist->SetBinContent(5, 10.); + systHist->SetBinContent(6, 10.); + systHist->SetBinContent(7, 10.); + systHist->SetBinContent(8, 10.); + systHist->SetBinContent(9, 10.); + systHist->SetBinContent(10, 10.); + systHist->SetBinContent(13, 5.); + } + else if(xOrY=="y"){ + systHist->SetBinContent(1, 15.); + systHist->SetBinContent(2, 15.); + systHist->SetBinContent(3, 15.); + systHist->SetBinContent(4, 20.); + systHist->SetBinContent(5, 15.); + systHist->SetBinContent(6, 15.); + systHist->SetBinContent(7, 15.); + systHist->SetBinContent(8, 15.); + systHist->SetBinContent(9, 15.); + systHist->SetBinContent(10, 15.); + systHist->SetBinContent(13, 5.); + } + } + if(tob){ + systHist->SetBinContent(1, 15.); + systHist->SetBinContent(2, 15.); + systHist->SetBinContent(3, 10.); + systHist->SetBinContent(4, 10.); + systHist->SetBinContent(5, 10.); + systHist->SetBinContent(6, 10.); + systHist->SetBinContent(7, 15.); + systHist->SetBinContent(8, 10.); + } } if(systHist){ systHist->SetFillColor(1); - systHist->SetFillStyle(3004); - systHist->Draw("same"); + systHist->SetFillStyle(3004); + systHist->Draw("same"); } - + canvas->Modified(); canvas->Update(); @@ -766,13 +766,13 @@ void DrawIteration::drawFinals(const std::string& xOrY){ legend->SetMargin(0.30); legend->SetBorderSize(0); - if(v_hist.size()>1){ + if(v_hist.size()>0){ for(i_hist = v_hist.begin(), iHist = 1; i_hist != v_hist.end(); ++i_hist, ++iHist){ - legend->AddEntry((*i_hist).first, (*i_hist).second, "lp"); - } - legend->Draw("same"); + legend->AddEntry((*i_hist).first, (*i_hist).second, "lp"); + } + legend->Draw("same"); } - + canvas->Modified(); canvas->Update(); @@ -782,7 +782,7 @@ void DrawIteration::drawFinals(const std::string& xOrY){ cmsText->SetNDC(); cmsText->Draw("same"); } - + canvas->Modified(); canvas->Update(); diff --git a/Alignment/APEEstimation/plugins/ApeEstimator.cc b/Alignment/APEEstimation/plugins/ApeEstimator.cc index bc09552bc4892..732b1063922e0 100644 --- a/Alignment/APEEstimation/plugins/ApeEstimator.cc +++ b/Alignment/APEEstimation/plugins/ApeEstimator.cc @@ -304,7 +304,6 @@ ApeEstimator::sectorBuilder(){ unsigned int sectorCounter(0); std::vector v_sectorDef(parameterSet_.getParameter >("Sectors")); edm::LogInfo("SectorBuilder")<<"There are "<::const_iterator i_parSet; for(auto const & parSet : v_sectorDef){ ++sectorCounter; const std::string& sectorName(parSet.getParameter("name")); @@ -994,7 +993,6 @@ ApeEstimator::fillTrackVariables(const reco::Track& track, const Trajectory& tra const std::vector& v_meas = traj.measurements(); int count2D(0); float meanPhiSensToNorm(0.F); - std::vector::const_iterator i_meas; for(auto const & i_meas : v_meas){ const TrajectoryMeasurement& meas = i_meas; const TransientTrackingRecHit& hit = *meas.recHit(); diff --git a/Alignment/APEEstimation/scripts/initialise.bash b/Alignment/APEEstimation/scripts/initialise.bash index ecec4628f685b..e8db40b0b7204 100644 --- a/Alignment/APEEstimation/scripts/initialise.bash +++ b/Alignment/APEEstimation/scripts/initialise.bash @@ -2,42 +2,13 @@ DIRBASE="$CMSSW_BASE/src/Alignment/APEEstimation" - mkdir $CMSSW_BASE/src/Alignment/TrackerAlignment/hists/ - mkdir $DIRBASE/hists/ mkdir $DIRBASE/hists/workingArea/ mkdir $DIRBASE/hists/workingArea/apeObjects/ mkdir $DIRBASE/test/batch/workingArea/ mkdir $DIRBASE/test/autoSubmitter/workingArea/ -mkdir $DIRBASE/test/cfgTemplateDesign/workingArea/ -mkdir $DIRBASE/test/cfgTemplateMc/workingArea/ -mkdir $DIRBASE/test/cfgTemplateData/workingArea/ -#mkdir $DIRBASE/test/cfgTemplateParticleGun/workingArea/ - - - -cp $DIRBASE/test/cfgTemplate/createStep2.bash $DIRBASE/test/cfgTemplateDesign/createStep2.bash -cp $DIRBASE/test/cfgTemplate/startStep1.bash $DIRBASE/test/cfgTemplateDesign/startStep1.bash -cp $DIRBASE/test/cfgTemplate/startStep2.bash $DIRBASE/test/cfgTemplateDesign/startStep2.bash - - -cp $DIRBASE/test/cfgTemplate/createStep2.bash $DIRBASE/test/cfgTemplateMc/createStep2.bash -cp $DIRBASE/test/cfgTemplate/startStep1.bash $DIRBASE/test/cfgTemplateMc/startStep1.bash -cp $DIRBASE/test/cfgTemplate/startStep2.bash $DIRBASE/test/cfgTemplateMc/startStep2.bash - - -cp $DIRBASE/test/cfgTemplate/createStep2.bash $DIRBASE/test/cfgTemplateData/createStep2.bash -cp $DIRBASE/test/cfgTemplate/startStep1.bash $DIRBASE/test/cfgTemplateData/startStep1.bash -cp $DIRBASE/test/cfgTemplate/startStep2.bash $DIRBASE/test/cfgTemplateData/startStep2.bash - - -#cp $DIRBASE/test/cfgTemplate/createStep2.bash $DIRBASE/test/cfgTemplateParticleGun/createStep2.bash -#cp $DIRBASE/test/cfgTemplate/startStep1.bash $DIRBASE/test/cfgTemplateParticleGun/startStep1.bash -#cp $DIRBASE/test/cfgTemplate/startStep2.bash $DIRBASE/test/cfgTemplateParticleGun/startStep2.bash - - ## INFO: To run TrackListGenerator on AOD, need to comment in ## /Alignment/CommonAlignmentProducer/plugins/AlignmentTrackSelectorModule.cc diff --git a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py index db4ba3fceb338..b1551821edc08 100644 --- a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py +++ b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitter.py @@ -1,17 +1,33 @@ from __future__ import print_function import ConfigParser import argparse -import pickle +import shelve import sys import os import subprocess import shutil import time +import re +sys.path.append("../plottingTools") - -pickle_name = "dump.pkl" # contains all the measurement objects in a list +shelve_name = "dump.shelve" # contains all the measurement objects and plot objects history_file = "history.log" clock_interval = 20 # in seconds +delete_logs_after_finish = True # if it is not desired to keep the log and submit script files + +# regex matching on key, replacement of groups on value +# implement any other shortcuts that you want to use +shortcuts = {} +#sources +shortcuts["mp([0-9]*)"] = "sqlite_file:/afs/cern.ch/cms/CAF/CMSALCA/ALCA_TRACKERALIGN/MP/MPproduction/mp{0}/jobData/jobm/alignments_MP.db" +shortcuts["mp([0-9]*)_jobm([0-9]*)"] = "sqlite_file:/afs/cern.ch/cms/CAF/CMSALCA/ALCA_TRACKERALIGN/MP/MPproduction/mp{0}/jobData/jobm{1}/alignments_MP.db" +shortcuts["sm([0-9]*)_iter([0-9]*)"] = "sqlite_file:/afs/cern.ch/cms/CAF/CMSALCA/ALCA_TRACKERALIGN2/HipPy/alignments/sm{0}/alignments_iter{1}.db" +shortcuts["hp([0-9]*)_iter([0-9]*)"] = "sqlite_file:/afs/cern.ch/cms/CAF/CMSALCA/ALCA_TRACKERALIGN2/HipPy/alignments/hp{0}/alignments_iter{1}.db" +shortcuts["prod"] = "frontier://FrontierProd/CMS_CONDITIONS" + + +# Exact numbers don't really matter, but it is important that each one has a unique +# number, so that states are distinguishable STATE_NONE = -1 STATE_ITERATION_START=0 STATE_BJOBS_WAITING=1 @@ -84,10 +100,31 @@ def replaceAllRanges(string): return [string,] +def replaceShortcuts(toScan): + global shortcuts + for key, value in shortcuts.items(): + match = re.search(key, toScan) + if match and match.group(0) == toScan: + return value.format(*match.groups()) + # no match + return toScan + +def loadConditions(dictionary): + hasAlignmentCondition = False + conditions = [] + for key, value in dictionary.items(): + if key.startswith("condition"): + record = key.split(" ")[1] + connect, tag = value.split(" ") + if record == "TrackerAlignmentRcd": + hasAlignmentCondition = True + conditions.append({"record":record, "connect":replaceShortcuts(connect), "tag":tag}) + return conditions, hasAlignmentCondition -def save(measurements): - with open(pickle_name, "w") as saveFile: - pickle.dump(measurements, saveFile) +def save(name, object): + sh = shelve.open(shelve_name) + sh[name] = object + sh.close() class Dataset: name = "" @@ -112,19 +149,15 @@ def __init__(self, config, name): self.nFiles = len(self.fileList) if dsDict.has_key("maxEvents"): - self.maxEvents = dsDict["maxEvents"] + self.maxEvents = int(dsDict["maxEvents"]) if dsDict.has_key("isMC"): if dsDict["isMC"] == "True": self.sampleType = "MC" else: self.sampleType ="data1" - - self.conditions = [] - for key, value in dsDict.items(): - if key.startswith("condition"): - record = key.split(" ")[1] - connect, tag = value.split(" ") - self.conditions.append({"record":record, "connect":connect, "tag":tag}) + + self.conditions, dummy = loadConditions(dsDict) + class Alignment: name = "" @@ -147,15 +180,8 @@ def __init__(self, config, name): if alDict.has_key("isDesign"): self.isDesign= (alDict["isDesign"] == "True") - self.hasAlignmentCondition = False # If this is true, no other Alignment-Object is loaded in apeEstimator_cfg.py using the alignmentName - self.conditions = [] - for key, value in alDict.items(): - if key.startswith("condition"): - record = key.split(" ")[1] - connect, tag = value.split(" ") - if record == "TrackerAlignmentRcd": - self.hasAlignmentCondition = True - self.conditions.append({"record":record, "connect":connect, "tag":tag}) + # If self.hasAlignmentCondition is true, no other Alignment-Object is loaded in apeEstimator_cfg.py using the alignmentName + self.conditions, self.hasAlignmentCondition = loadConditions(alDict) # check if at least one of the two ways to define the alignment was used if self.alignmentName == None and not self.hasAlignmentCondition: @@ -176,7 +202,7 @@ class ApeMeasurement: startTime = "" finishTime = "" - def __init__(self, name, dataset, alignment, additionalOptions={}): + def __init__(self, name, dataset, alignment, config, additionalOptions={}): self.name = name self.alignment = alignment self.dataset = dataset @@ -186,12 +212,19 @@ def __init__(self, name, dataset, alignment, additionalOptions={}): self.failedJobs = [] self.startTime = subprocess.check_output(["date"]).strip() + self.maxEvents = self.dataset.maxEvents + # standards for result plot + self.resultPlotTitle="" + self.resultPlotLabel=self.name + self.resultPlotDo=False + self.resultPlotOutPath = '{}/hists/{}/'.format(base, self.name) + for key, value in additionalOptions.items(): setattr(self, key, value) - print(key, value) self.firstIteration=int(self.firstIteration) self.maxIterations=int(self.maxIterations) self.curIteration = self.firstIteration + self.maxEvents = int(self.maxEvents) if self.alignment.isDesign: self.maxIterations = 0 @@ -202,12 +235,21 @@ def __init__(self, name, dataset, alignment, additionalOptions={}): if not self.alignment.isDesign: ensurePathExists('{}/hists/{}/apeObjects'.format(base, self.name)) + if self.resultPlotDo == "True": + self.resultPlotTitle = self.resultPlotTitle.replace("~", " ") + # Adds new section to config file so it is read in the next step + sectionName = "resultplot:{}".format(self.name) + config.add_section(sectionName) + config.set(sectionName, "wait {}".format(self.resultPlotLabel), "{} {}".format(self.name, min(14, self.maxIterations-1))) + config.set(sectionName, "title", self.resultPlotTitle) + config.set(sectionName, "outPath", self.resultPlotOutPath) + def get_status(self): return status_map[self.status] def print_status(self): print("APE Measurement {} in iteration {} is now in status {}".format(self.name, self.curIteration, self.get_status())) - + def submit_jobs(self): toSubmit = [] @@ -216,6 +258,7 @@ def submit_jobs(self): # will be kept in case of overlap, which is the same as if there was no overlap removal # If conditions are made, create file to load them from + rawFileName = "None" conditionsFileName = "None" if len(allConditions) > 0: conditionsFileName = "{base}/python/conditions/conditions_{name}_iter{iterNo}_cff.py".format(base=base,name=self.name, iterNo=self.curIteration) @@ -234,72 +277,100 @@ def submit_jobs(self): lastIter = (self.curIteration==self.maxIterations) and not self.alignment.isDesign + inputCommands = "sample={sample} fileNumber={fileNo} iterNumber={iterNo} lastIter={lastIter} alignRcd={alignRcd} maxEvents={maxEvents} globalTag={globalTag} measurementName={name} conditions={conditions}".format(sample=self.dataset.sampleType,fileNo="$1",iterNo=self.curIteration,lastIter=lastIter,alignRcd=alignmentNameToUse, maxEvents=self.maxEvents, globalTag=self.alignment.globalTag, name=self.name, conditions=rawFileName) + + from autoSubmitterTemplates import condorJobTemplate + jobFileContent = condorJobTemplate.format(base=base, inputFile="$2", inputCommands=inputCommands) + jobFileName = "{}/test/autoSubmitter/workingArea/batchscript_{}_iter{}.tcsh".format(base, self.name,self.curIteration) + with open(jobFileName, "w") as jobFile: + jobFile.write(jobFileContent) + # create a batch job file for each input file + arguments = "" + from autoSubmitterTemplates import condorArgumentTemplate for i in range(self.dataset.nFiles): inputFile = self.dataset.fileList[i] - inputCommands = "sample={sample} fileNumber={fileNo} iterNumber={iterNo} lastIter={lastIter} alignRcd={alignRcd} maxEvents={maxEvents} globalTag={globalTag} measurementName={name} conditions={conditions}".format(sample=self.dataset.sampleType,fileNo=i+1,iterNo=self.curIteration,lastIter=lastIter,alignRcd=alignmentNameToUse, maxEvents=self.dataset.maxEvents, globalTag=self.alignment.globalTag, name=self.name, conditions=rawFileName) - fiName = "{}/test/autoSubmitter/workingArea/batchscript_{}_iter{}_{}".format(base, self.name,self.curIteration,i+1) - with open(fiName+".tcsh", "w") as jobFile: - from autoSubmitterTemplates import bjobTemplate - jobFile.write(bjobTemplate.format(inputFile = inputFile, inputCommands=inputCommands)) - toSubmit.append((fiName,i+1)) - - # submit all batch jobs - submitName = "{}/test/autoSubmitter/workingArea/submit_{}_jobs_iter{}.sh".format(base, self.name, self.curIteration) - with open(submitName,"w") as submitFile: - for sub,number in toSubmit: - from autoSubmitterTemplates import submitJobTemplate - errorFile = sub+"_error.txt" - outputFile = sub+"_output.txt" - jobFile = sub+".tcsh" - date = subprocess.check_output(["date", "+%m_%d_%H_%M_%S"]).strip() - jobName = sub.split("/")[-1]+"_"+date - self.runningJobs.append((jobName, number)) - submitFile.write(submitJobTemplate.format(errorFile=errorFile, outputFile=outputFile, jobFile=jobFile, jobName=jobName)) - submitFile.write("rm -- $0") - - subOut = subprocess.check_output("bash {}".format(submitName), shell=True).strip() + fileNumber = i+1 + arguments += condorArgumentTemplate.format(fileNumber=fileNumber, inputFile=inputFile) + + # build condor submit script + date = subprocess.check_output(["date", "+%m_%d_%H_%M_%S"]).strip() + sub = "{}/test/autoSubmitter/workingArea/job_{}_iter{}".format(base, self.name, self.curIteration) + + errorFileTemp = sub+"_error_{}.txt" + errorFile = errorFileTemp.format("$(ProcId)") + outputFile = sub+"_output_$(ProcId).txt" + logFileTemp= sub+"_condor_{}.log" + logFile = logFileTemp.format("$(ProcId)") + jobFile = sub+".tcsh" + jobName = "{}_{}".format(self.name, self.curIteration) + for i in range(self.dataset.nFiles): + # make file empty if it existed before + with open(logFileTemp.format(i), "w") as fi: + pass + + # create submit file + from autoSubmitterTemplates import condorSubTemplate + submitFileContent = condorSubTemplate.format(jobFile=jobFileName, outputFile=outputFile, errorFile=errorFile, logFile=logFile, arguments=arguments, jobName=jobName) + submitFileName = "{}/test/autoSubmitter/workingArea/submit_{}_jobs_iter{}.sub".format(base, self.name, self.curIteration) + with open(submitFileName, "w") as submitFile: + submitFile.write(submitFileContent) + + # submit batch + from autoSubmitterTemplates import submitCondorTemplate + subOut = subprocess.check_output(submitCondorTemplate.format(subFile=submitFileName), shell=True).strip() + if len(subOut) == 0: print("Running on environment that does not know bsub command or ssh session is timed out (ongoing for longer than 24h?), exiting") sys.exit() + + cluster = subOut.split(" ")[-1][:-1] + for i in range(self.dataset.nFiles): + # list contains condor log files from which to read when job is terminated + self.runningJobs.append((logFileTemp.format(i), errorFileTemp.format(i), "{}.{}".format(cluster, i))) + + self.status = STATE_BJOBS_WAITING self.print_status() def check_jobs(self): + # Job was aborted by the user. lastStatus = self.status stillRunningJobs = [] # check all still running jobs - for job, number in self.runningJobs: - from autoSubmitterTemplates import checkJobTemplate - checkString = checkJobTemplate.format(jobName=job) - jobState = subprocess.check_output(checkString, shell=True).rstrip() - if "DONE" in jobState: - # Catch Exceptions that do not influence the job state but make the measurement fail anyway - errFile = "{}/test/autoSubmitter/workingArea/batchscript_{}_iter{}_{}_error.txt".format(base, self.name,self.curIteration,number) - foundErr = False - with open(errFile, "r") as err: - for line in err: - if "Fatal Exception" in line.strip(): - foundErr = True - break - if foundErr: - print("Job {} in iteration {} of APE measurement {} has failed".format(job, self.curIteration, self.name)) - self.failedJobs.append(job) + for logName, errName, jobId in self.runningJobs: + if not os.path.isfile(logName): + print("{} does not exist even though it should, marking job as failed".format(logName)) + self.failedJobs.append( (logName, errName) ) + with open(logName, "r") as logFile: + log = logFile.read() + if not "submitted" in log: + print("{} was apparently not submitted, did you empty the log file or is condor not working?".format(jobId)) + self.failedJobs.append( (logName, errName) ) + + if "Job was aborted" in log: + print("Job {} of measurement {} in iteration {} was aborted".format(jobId, self.name, self.curIteration)) + self.failedJobs.append( (logName, errName) ) + elif "Job terminated" in log: + if "Normal termination (return value 0)" in log: + foundErr = False + with open(errName, "r") as err: + for line in err: + if "Fatal Exception" in line.strip(): + foundErr = True + break + if not foundErr: + print("Job {} of measurement {} in iteration {} finished successfully".format(jobId, self.name, self.curIteration)) + else: + # Fatal error in stderr + print("Job {} of measurement {} in iteration {} has a fatal error".format(jobId, self.name, self.curIteration)) + self.failedJobs.append( (logName, errName) ) else: - print("Job {} in iteration {} of APE measurement {} has just finished".format(job, self.curIteration, self.name)) - elif "EXIT" in jobState: - print("Job {} in iteration {} of APE measurement {} has failed".format(job, self.curIteration, self.name)) - self.failedJobs.append(job) - elif "RUN" in jobState or "PEND" in jobState: - stillRunningJobs.append((job, number)) - elif "Job <{}> is not found".format(job) in jobState: - print("Job {} of APE measurement was not found in queue, so it is assumed that it successfully finished long ago.".format(job, self.name)) - elif len(jobState) == 0: - print("Running on environment that does not know bjobs command or ssh session is timed out (ongoing for longer than 24h?), exiting") - sys.exit() + # nonzero return value + print("Job {} of measurement {} in iteration {} failed".format(jobId, self.name, self.curIteration)) + self.failedJobs.append( (logName, errName) ) else: - print("Unknown state {}, marking job {} of APE measurement {} as failed".format(jobState, job, self.name)) - self.failedJobs.append(job) + stillRunningJobs.append( (logName, errName, jobId) ) self.runningJobs = stillRunningJobs # at least one job failed @@ -308,7 +379,24 @@ def check_jobs(self): self.finishTime = subprocess.check_output(["date"]).strip() elif len(self.runningJobs) == 0: self.status = STATE_BJOBS_DONE - print("All batch jobs of APE measurement {} in iteration {} are done".format(self.name, self.curIteration)) + print("All condor jobs of APE measurement {} in iteration {} are done".format(self.name, self.curIteration)) + + # remove files + if delete_logs_after_finish: + submitFile = "{}/test/autoSubmitter/workingArea/submit_{}_jobs_iter{}.sub".format(base, self.name, self.curIteration) + jobFile = "{}/test/autoSubmitter/workingArea/batchscript_{}_iter{}.tcsh".format(base, self.name,self.curIteration) + os.remove(submitFile) + os.remove(jobFile) + + for i in range(self.dataset.nFiles): + sub = "{}/test/autoSubmitter/workingArea/job_{}_iter{}".format(base, self.name, self.curIteration) + errorFile = sub+"_error_{}.txt".format(i) + outputFile = sub+"_output_{}.txt".format(i) + logFile = sub+"_condor_{}.log".format(i) + os.remove(errorFile) + os.remove(outputFile) + os.remove(logFile) + if lastStatus != self.status: self.print_status() @@ -386,8 +474,8 @@ def finish_iteration(self): def kill(self): from autoSubmitterTemplates import killJobTemplate - for job, number in self.runningJobs: - subprocess.call(killJobTemplate.format(jobName=job), shell=True) + for log, err, jobId in self.runningJobs: + subprocess.call(killJobTemplate.format(jobId=jobId), shell=True) self.runningJobs = [] self.status = STATE_NONE @@ -397,6 +485,59 @@ def purge(self): shutil.rmtree(folderName) # remove log-files as well? + +class ResultPlot: + def __init__(self, config, name): + rpDict = dict(config.items(name)) + self.waitingFor = [] + self.loadingFrom = [] + self.making = [] + self.name = name.split("resultplot:")[1] + self.outPath = "{}/hists/{}/".format(base,self.name) + self.title = "" + self.granularity = "standardGranularity" + + for key, value in rpDict.items(): + if key.startswith("wait "): + label = key.split(" ")[1] + label = label.replace("~", " ") + if len(value.split(" ")) > 1: + name, iteration = value.split(" ") + else: + name, iteration = value, "14" + self.waitingFor.append((name, iteration, label)) + elif key.startswith("load "): + label = key.split(" ")[1] + label = label.replace("~", " ") + self.loadingFrom.append((value,label)) + else: + setattr(self, key, value) + + def check_finished(self, finished_measurements): + for waiting in self.waitingFor: + if not waiting[0] in finished_measurements.keys(): + return False + return True + + def do_plot(self): + import sys + from resultPlotter import ResultPlotter + import granularity + + plotter = ResultPlotter() + plotter.setOutputPath(self.outPath) + plotter.setTitle(self.title) + plotter.setGranularity(getattr(granularity, self.granularity)) + + for path, label in self.loadingFrom: + plotter.addInputFile(label, path, label) + for name, iteration, label in self.waitingFor: + path = '{}/hists/{}/iter{}/allData_iterationApe.root'.format(base, name, iteration) + plotter.addInputFile(name, path, label) + ensurePathExists(self.outPath) + plotter.draw() + + def main(): parser = argparse.ArgumentParser(description="Automatically run APE measurements") parser.add_argument("-c", "--config", action="append", dest="configs", default=[], @@ -406,18 +547,18 @@ def main(): parser.add_argument("-p", "--purge", action="append", dest="purge", default=[], help="List of measurement names to purge (=kill and remove folder)") parser.add_argument("-r", "--resume", action="append", dest="resume", default=[], - help="Resume interrupted APE measurements which are stored in pickle files") - parser.add_argument("-d", "--dump", action="append", dest="dump", default=[], - help='Specify in which .pkl file to store the measurements') + help="Resume interrupted APE measurements which are stored in shelves (specify shelves)") + parser.add_argument("-d", "--dump", action="store", dest="dump", default=None, + help='Specify in which .shelve file to store the measurements and plots') args = parser.parse_args() global base global clock_interval - global pickle_name + global shelve_name - if args.dump != []: # choose different file than default - pickle_name = args.dump[0] + if args.dump != None: # choose different file than default + shelve_name = args.dump try: base = os.environ['CMSSW_BASE']+"/src/Alignment/APEEstimation" @@ -425,27 +566,41 @@ def main(): print("No CMSSW environment was set, exiting") sys.exit() + killTargets = [] + purgeTargets = [] + for toConvert in args.kill: + killTargets += replaceAllRanges(toConvert) + + for toConvert in args.purge: + purgeTargets += replaceAllRanges(toConvert) measurements = [] + finished_measurements = {} + resultPlots = [] if args.resume != []: for resumeFile in args.resume: try: - with open(resumeFile, "r") as saveFile: - resumed = pickle.load(saveFile) - for res in resumed: - measurements.append(res) - print("Measurement {} in state {} in iteration {} was resumed".format(res.name, res.get_status(), res.curIteration)) - # Killing and purging is done here, because it doesn't make - # sense to kill or purge a measurement that was just started - for to_kill in args.kill: - if res.name == to_kill: - res.kill() - for to_purge in args.purge: - if res.name == to_purge: - res.purge() + sh = shelve.open(resumeFile) + resumed = sh["measurements"] + resumed_plots = sh["resultPlots"] + sh.close() + for res in resumed: + measurements.append(res) + print("Measurement {} in state {} in iteration {} was resumed".format(res.name, res.get_status(), res.curIteration)) + # Killing and purging is done here, because it doesn't make + # sense to kill or purge a measurement that was just started + for to_kill in args.kill: + if res.name == to_kill: + res.kill() + for to_purge in args.purge: + if res.name == to_purge: + res.purge() + for res in resumed_plots: + resultPlots.append(res) + print("Result plot {} was resumed".format(res.name)) except IOError: - print("Could not resume because {} could not be opened, exiting".format(pickle_name)) + print("Could not resume because {} could not be opened, exiting".format(shelve_name)) sys.exit() # read out from config file @@ -477,79 +632,111 @@ def main(): value = setting.split("=")[1] additionalOptions[key] = value - measurement = ApeMeasurement(name, dataset, alignment, additionalOptions) + measurement = ApeMeasurement(name, dataset, alignment, config, additionalOptions) measurements.append(measurement) print("APE Measurement {} was started".format(measurement.name)) - + + for name in config.sections(): + if name.startswith("resultplot:"): + if not name.split("resultplot:")[1] in map(lambda x: x.name ,resultPlots): + resultPlots.append(ResultPlot(config, name)) + print("Result plot {} was queued".format(name)) while True: - measurements = [measurement for measurement in measurements if not measurement.status==STATE_NONE] - save(measurements) + # remove finished and failed measurements + measurements = [measurement for measurement in measurements if not (measurement.status==STATE_NONE or measurement.status == STATE_FINISHED)] + save("measurements", measurements) + save("resultPlots", resultPlots) - if len(measurements) == 0: - print("No APE measurements are active, exiting") - break for measurement in measurements: if measurement.status == STATE_ITERATION_START: # start bjobs print("APE Measurement {} just started iteration {}".format(measurement.name, measurement.curIteration)) measurement.submit_jobs() - save(measurements) + save("measurements", measurements) continue # no reason to immediately check jobs if measurement.status == STATE_BJOBS_WAITING: # check if bjobs are finished measurement.check_jobs() - save(measurements) - #~ if measurement.status == STATE_BJOBS_DONE: - #~ continue # give time for files to be closed and delivered back from the batch jobs + save("measurements", measurements) if measurement.status == STATE_BJOBS_DONE: # merge files measurement.do_merge() - save(measurements) + save("measurements", measurements) if measurement.status == STATE_MERGE_DONE: # start summary measurement.do_summary() - save(measurements) + save("measurements", measurements) if measurement.status == STATE_SUMMARY_DONE: # start local setting (only if not a baseline measurement) if measurement.alignment.isDesign: measurement.status = STATE_LOCAL_DONE else: measurement.do_local_setting() - save(measurements) + save("measurements", measurements) if measurement.status == STATE_LOCAL_DONE: measurement.finish_iteration() - save(measurements) + save("measurements", measurements) # go to next iteration or finish measurement if measurement.status == STATE_BJOBS_FAILED or \ measurement.status == STATE_MERGE_FAILED or \ measurement.status == STATE_SUMMARY_FAILED or \ measurement.status == STATE_LOCAL_FAILED or \ measurement.status == STATE_FINISHED: - # might want to do something different. for now, this is the solution with open(history_file, "a") as fi: fi.write("APE measurement {name} which was started at {start} finished at {end} with state {state} in iteration {iteration}\n".format(name=measurement.name, start=measurement.startTime, end=measurement.finishTime, state=measurement.get_status(), iteration=measurement.curIteration)) - measurement.status = STATE_NONE - save(measurements) + + if measurement.status == STATE_FINISHED: + finished_measurements[measurement.name] = measurement + else: + measurement.status = STATE_NONE + + + save("measurements", measurements) if measurement.status == STATE_ITERATION_START: # this ensures that jobs do not go into idle if many measurements are done simultaneously # start bjobs print("APE Measurement {} just started iteration {}".format(measurement.name, measurement.curIteration)) measurement.submit_jobs() - save(measurements) + save("measurements", measurements) + + # Check if there are plots to do + changed = False + tempList = [] + for plot in resultPlots: + if plot.check_finished(finished_measurements): + plot.do_plot() + changed = True + with open(history_file, "a") as fi: + fi.write("Result plot {name} was created in folder {outPath}\n".format(name=plot.name, outPath=plot.outPath)) + else: + tempList.append(plot) + resultPlots = tempList + tempList = None + if changed: + save("resultPlots", resultPlots) - time_remaining = clock_interval - while time_remaining > 0: - print("Sleeping for {} seconds, you can safely [CTRL+C] now".format(time_remaining)) - time.sleep(1) - time_remaining -= 1 + if len(measurements) == 0: + print("No APE measurements are active, exiting") + break + + + try: # so that interrupting does not give an error message and just ends the program + time_remaining = clock_interval + while time_remaining > 0: + print("Sleeping for {} seconds, you can safely [CTRL+C] now".format(time_remaining)) + time.sleep(1) + time_remaining -= 1 + sys.stdout.write("\033[F") + sys.stdout.write("\033[K") + print("") sys.stdout.write("\033[F") sys.stdout.write("\033[K") - print("") - sys.stdout.write("\033[F") - sys.stdout.write("\033[K") + except KeyboardInterrupt: + sys.exit(0) + if __name__ == "__main__": main() diff --git a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitterTemplates.py b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitterTemplates.py index 14cae30720d9a..c5f52bf8aa18a 100644 --- a/Alignment/APEEstimation/test/autoSubmitter/autoSubmitterTemplates.py +++ b/Alignment/APEEstimation/test/autoSubmitter/autoSubmitterTemplates.py @@ -1,26 +1,43 @@ -bjobTemplate="""#!/bin/tcsh +condorJobTemplate="""#!/bin/tcsh -cd $CMSSW_BASE/src +set curDir=$PWD +echo $curDir +cd {base}/../.. eval `scramv1 runtime -csh` source /afs/cern.ch/cms/caf/setup.csh -cd - +cd $curDir xrdcp {inputFile} reco.root -cmsRun $CMSSW_BASE/src/Alignment/APEEstimation/test/cfgTemplate/apeEstimator_cfg.py {inputCommands} +cmsRun {base}/test/cfgTemplate/apeEstimator_cfg.py {inputCommands} -rm -- "$0" +rm reco.root """ -submitJobTemplate=""" -bsub -J {jobName} -e {errorFile} -o {outputFile} -q cmscaf1nd -R "rusage[pool=3000]" tcsh {jobFile} +condorSubTemplate=""" +Executable = {jobFile} +Universe = vanilla +Output = {outputFile} +Error = {errorFile} +Log = {logFile} +request_memory = 2000M +request_disk = 400M +batch_name = {jobName} ++JobFlavour = "longlunch" +Queue Arguments from ( +{arguments}) """ -checkJobTemplate="bjobs -noheader -a -J {jobName}" +condorArgumentTemplate="""{fileNumber} {inputFile} +""" + +submitCondorTemplate=""" +condor_submit {subFile} +""" -killJobTemplate="bkill -J {jobName}" +killJobTemplate="condor_rm {jobId}" summaryTemplate="cmsRun $CMSSW_BASE/src/Alignment/APEEstimation/test/cfgTemplate/apeEstimatorSummary_cfg.py {inputCommands}" @@ -30,12 +47,12 @@ conditionsFileHeader=""" import FWCore.ParameterSet.Config as cms +from CalibTracker.Configuration.Common.PoolDBESSource_cfi import poolDBESSource def applyConditions(process): """ conditionsTemplate=""" - import CalibTracker.Configuration.Common.PoolDBESSource_cfi - process.my{record}Conditions = CalibTracker.Configuration.Common.PoolDBESSource_cfi.poolDBESSource.clone( + process.my{record}Conditions = poolDBESSource.clone( connect = cms.string('{connect}'), toGet = cms.VPSet(cms.PSet(record = cms.string('{record}'), tag = cms.string('{tag}') diff --git a/Alignment/APEEstimation/test/autoSubmitter/config.ini b/Alignment/APEEstimation/test/autoSubmitter/config.ini index 6061f2874464b..21c8a5f5dfd21 100644 --- a/Alignment/APEEstimation/test/autoSubmitter/config.ini +++ b/Alignment/APEEstimation/test/autoSubmitter/config.ini @@ -32,6 +32,25 @@ condition TrackerSurfaceDeformationRcd=sqlite_file:/afs/asdf.db Deformations # name: dataset alignment # unique names are important as this name will be used as a folder name # where all relevant files are stored -# firstIteration and maxIterations are optional, 0 and 15 by default. -# maxIterations is forced to 0 if alignmentObject has isDesign=True -exampleName: exampleDataset alignmentObject firstIteration=0 maxIterations=15 +# additional arguments are added with spaces in the structure option=value +exampleName: exampleDataset alignmentObject +# Additional arguments can be (default arguments in brackets): +# firstIteration (0) +# maxIterations (15) +# maxEvents (-1 or from dataset) +# +# for automatic result plot: +# resultPlotDo (False) +# resultPlotTitle() +# resultPlotOutPath(path of measurement) + + +[resultplot:Name] +# wait for these measurements to finish (have to be run in same instance) +wait label:exampleName +# load APE from these files (in labels ~ is replaced with whitespaces) +load label~with~empty~spaces:/path/to/allData_iterationApe.root +# output path. If not defined, store in hists/Name +outPath=/asdf/ +# title to the plot, optional +title=Whatever title you want diff --git a/Alignment/APEEstimation/test/batch/startSkim.py b/Alignment/APEEstimation/test/batch/startSkim.py index 153fdd88a9732..dd1d83e6c9dd4 100644 --- a/Alignment/APEEstimation/test/batch/startSkim.py +++ b/Alignment/APEEstimation/test/batch/startSkim.py @@ -121,6 +121,8 @@ def main(argv): help="Name of sample as defined in skimProducer_cfg.py. Multiple inputs possible") parser.add_argument("-c", "--consecutive", action="store_true", dest="consecutive", default=False, help="Do consecutive instead of parallel skims") + parser.add_argument("-n", "--ncores", action="store", dest="ncores", default=-1, type="int", + help="Set maximum number of parallel skims to run") args = parser.parse_args() @@ -135,12 +137,15 @@ def main(argv): args.samples = finalSamples + if args.ncores<0 or args.ncores > len(args.samples): + args.ncores = len(args.samples) + if len(args.samples) == 1 or args.consecutive: for sample in args.samples: doSkim(sample) else: try: - pool = mp.Pool(len(args.samples)) + pool = mp.Pool(args.ncores) pool.map_async(doSkim, args.samples) pool.close() pool.join() diff --git a/Alignment/APEEstimation/test/cfgTemplate/batchSubmitTemplate.tcsh b/Alignment/APEEstimation/test/cfgTemplate/batchSubmitTemplate.tcsh deleted file mode 100644 index 96691df830047..0000000000000 --- a/Alignment/APEEstimation/test/cfgTemplate/batchSubmitTemplate.tcsh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/tcsh - - -cd $CMSSW_BASE/src -#if [[ "$SHELL" == /bin/sh || "$SHELL" == /bin/bash || "$SHELL" == /bin/zsh ]] ; then -# eval `scram runtime -sh` -#elif [[ "$SHELL" == /bin/csh || "$SHELL" == /bin/tcsh ]] ; then -eval `scram runtime -csh` -#else -# echo "Unknown shell: $SHELL" -# echo "cannot set CMSSW environment, stop processing" -# exit 5 -#fi -source /afs/cern.ch/cms/caf/setup.csh -cd - - - -xrdcp _THE_INPUTBASE__THE_NUMBER_.root reco.root - - -cmsRun $CMSSW_BASE/src/Alignment/APEEstimation/test/cfgTemplate/apeEstimator_cfg.py_THE_COMMANDS_ - - -rm -- "$0" - - - diff --git a/Alignment/APEEstimation/test/cfgTemplate/createStep2.bash b/Alignment/APEEstimation/test/cfgTemplate/createStep2.bash deleted file mode 100644 index 4b9419d8b8cf4..0000000000000 --- a/Alignment/APEEstimation/test/cfgTemplate/createStep2.bash +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/bash - - - -if [ ! $# -ge 1 ]; then - echo "Usage: $0 iterationNumber" - echo "Usage: $0 iterationNumber setBaseline" - exit 1 -fi - -iterationNumber="$1" -setBaseline="False" -if [ $# == 2 ]; then - setBaseline="$2"; - if [[ ! "$setBaseline" == False ]] && [[ ! "$setBaseline" == True ]] ; then - echo "Invalid argument for setBaseline: $setBaseline" - exit 2 - fi -fi - -echo "Iteration number: $1" -echo "Set Baseline: ${setBaseline}" -echo - - - - -####################################################### -## Config for summary step -cmsRunOptions=" iterNumber=$iterationNumber setBaseline=$setBaseline" -echo "$cmsRunOptions" - -summaryTemplateFile="${CMSSW_BASE}/src/Alignment/APEEstimation/test/cfgTemplate/summaryTemplate.bash" - -summaryFile="${CMSSW_BASE}/src/Alignment/APEEstimation/test/batch/workingArea/summary.bash" -cat $summaryTemplateFile |sed "s/_THE_COMMANDS_/${cmsRunOptions}/g" > $summaryFile - - - - - - - - -####################################################### -## Create final output directory - - -ROOTFILEBASE="$CMSSW_BASE/src/Alignment/APEEstimation/hists" - -if [[ "$setBaseline" == True ]] ; then - fileDir="${ROOTFILEBASE}/Design/baseline" - - # If there is already output from previous studies, move it - if [ -d "${fileDir}" ] ; then - mv ${fileDir} ${ROOTFILEBASE}/Design/baseline_old ; - fi - mkdir ${ROOTFILEBASE}/Design - mkdir ${fileDir} -else - fileDir="${ROOTFILEBASE}/workingArea/iter${iterationNumber}" - - # If there is already output from previous studies, move it - if [ -d "${fileDir}" ] ; then - mv ${fileDir} ${ROOTFILEBASE}/workingArea/iter${iterationNumber}_old - fi - if [ -a /afs/cern.ch/user/h/hauk/scratch0/apeStudies/apeObjects/apeIter${iterationNumber}.db ] ; then - mv /afs/cern.ch/user/h/hauk/scratch0/apeStudies/apeObjects/apeIter${iterationNumber}.db /afs/cern.ch/user/h/hauk/scratch0/apeStudies/apeObjects/apeIter${iterationNumber}_old.db - fi - mkdir ${fileDir} - - if [ "$iterationNumber" -ne 0 ] ; then - declare -i nIterDecrement=${iterationNumber}-1 - cp ${ROOTFILEBASE}/workingArea/iter${nIterDecrement}/allData_iterationApe.root ${fileDir}/. - fi -fi - - - - - - -####################################################### -## Add root files from step1 and delete them, keep only summed file - - -hadd ${fileDir}/allData.root ${ROOTFILEBASE}/workingArea/*.root -if [ $? -eq 0 ] ; then - rm ${ROOTFILEBASE}/workingArea/*.root -fi - - - - diff --git a/Alignment/APEEstimation/test/cfgTemplate/startStep1.bash b/Alignment/APEEstimation/test/cfgTemplate/startStep1.bash deleted file mode 100644 index 9a489115be0e3..0000000000000 --- a/Alignment/APEEstimation/test/cfgTemplate/startStep1.bash +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - - -declare -i counter=1 - -for file in $CMSSW_BASE/src/Alignment/APEEstimation/test/batch/workingArea/*.tcsh; - -do - - bsub -J job${counter} -e error${counter}.txt -o output${counter}.txt -q cmscaf1nd -R "rusage[pool=3000]" tcsh $file - - counter=$counter+1 - -done - diff --git a/Alignment/APEEstimation/test/cfgTemplate/startStep2.bash b/Alignment/APEEstimation/test/cfgTemplate/startStep2.bash deleted file mode 100644 index 3eff73544fe44..0000000000000 --- a/Alignment/APEEstimation/test/cfgTemplate/startStep2.bash +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - - - -base="$CMSSW_BASE/src/Alignment/APEEstimation/test/batch/workingArea" - - - -bash $base/summary.bash - - - -rm $base/*.bash - diff --git a/Alignment/APEEstimation/test/cfgTemplate/summaryTemplate.bash b/Alignment/APEEstimation/test/cfgTemplate/summaryTemplate.bash deleted file mode 100644 index 521069e1b1df3..0000000000000 --- a/Alignment/APEEstimation/test/cfgTemplate/summaryTemplate.bash +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - - -base="$CMSSW_BASE/src/Alignment/APEEstimation/test/cfgTemplate" - - - -cmsRun $base/apeEstimatorSummary_cfg.py_THE_COMMANDS_ - - -cmsRun $base/apeLocalSetting_cfg.py_THE_COMMANDS_ - - - -if [ $? -eq 0 ] ; then - echo "\nAPE DB-Object created" -else - echo "\nNo APE DB-Object created" -fi - -if [ -a alignment.log ] ; then - rm alignment.log -fi - - - - diff --git a/Alignment/APEEstimation/test/cfgTemplate/writeSubmitScript.sh b/Alignment/APEEstimation/test/cfgTemplate/writeSubmitScript.sh deleted file mode 100644 index 482fb5cc1465b..0000000000000 --- a/Alignment/APEEstimation/test/cfgTemplate/writeSubmitScript.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/tcsh - - - -## Batch submit file template -BATCH_TEMPLATEFILE="${CMSSW_BASE}/src/Alignment/APEEstimation/test/cfgTemplate/batchSubmitTemplate.tcsh" -BATCH_OUTPUTBASE1="${CMSSW_BASE}/src/Alignment/APEEstimation/test/batch/workingArea/${datasetName}BatchSubmit" -BATCH_OUTPUTSUFFIX=".tcsh" - -helpFile1="help1.txt" -cat $BATCH_TEMPLATEFILE |sed "s/_THE_INPUTBASE_/root:\/\/eoscms\/\/eos\/cms\/${inputBase}/g" > $helpFile1 - - - - -## increment counter -declare -i counter1=1 - -## number of files to create (maximum value of counter!!!) -while [ $counter1 -le ${nFiles} ] -do - cmsRunOptions=" sample=$datasetName fileNumber=$counter1 iterNumber=$iterationNumber lastIter=$lastIteration alignRcd=$alignmentRcd" - #echo "$cmsRunOptions" - - helpFile2="help2.txt" - cat $helpFile1 |sed "s/_THE_COMMANDS_/${cmsRunOptions}/g" > $helpFile2 - - theBatchFilename="${BATCH_OUTPUTBASE1}${counter1}${BATCH_OUTPUTSUFFIX}" - cat $helpFile2 |sed "s/_THE_NUMBER_/${counter1}/g" > $theBatchFilename - - counter1=$counter1+1 -done - - -echo "Sample, number of files: $datasetName, $nFiles" - - -rm $helpFile1 -rm $helpFile2 - - - - - diff --git a/Alignment/APEEstimation/test/cfgTemplateData/createStep1.bash b/Alignment/APEEstimation/test/cfgTemplateData/createStep1.bash deleted file mode 100644 index 31561dacac61b..0000000000000 --- a/Alignment/APEEstimation/test/cfgTemplateData/createStep1.bash +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash - - - -if [ ! $# -ge 1 ]; then - echo "Usage: $0 iterationNumber" - echo "Usage: $0 iterationNumber lastIteration" - exit 1 -fi - -export iterationNumber="$1" -export lastIteration="False" -if [ $# == 2 ]; then - lastIteration="$2"; - if [[ ! "$lastIteration" == False ]] && [[ ! "$lastIteration" == True ]] ; then - echo "Invalid argument for lastIteration: $lastIteration" - exit 2 - fi -fi - -echo "Iteration number: $1" -echo "LastIteration: ${lastIteration}" -echo - - - - - -## Alignment -#export alignmentRcd="globalTag" -#~ export alignmentRcd="mp1791" -export alignmentRcd="hp1370" - -echo "Alignment Record: $alignmentRcd" -echo - - - -## Script to create submit scripts for specific dataset -createStep1="${CMSSW_BASE}/src/Alignment/APEEstimation/test/cfgTemplate/writeSubmitScript.sh" - -## identification name of dataset -export datasetName -## number of input files -export nFiles -## Input file base -cafDir="\/store\/caf\/user\/cschomak\/SingleMu2015RunB" -cafDir2="\/store\/caf\/user\/cschomak\/DoubleMu2015RunB" -export inputBase - - -datasetName="data1" -inputBase="${cafDir}\/DataSingleMuonRun2015BPromptReco" -nFiles=9 -bash $createStep1 $datasetName $nFiles $iterationNumber $lastIteration $alignmentRcd $inputBase - - -datasetName="data2" -inputBase="${cafDir2}\/DataDoubleMuonRun2015BPromptReco" -nFiles=1 -bash $createStep1 $datasetName $nFiles $iterationNumber $lastIteration $alignmentRcd $inputBase - - - diff --git a/Alignment/APEEstimation/test/cfgTemplateData/manual_run10Iterations.bash b/Alignment/APEEstimation/test/cfgTemplateData/manual_run10Iterations.bash deleted file mode 100644 index 2890c6957854e..0000000000000 --- a/Alignment/APEEstimation/test/cfgTemplateData/manual_run10Iterations.bash +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -COUNTER="0" - -# if data samples are larger switch to longer sleep times -while [ $COUNTER -lt 10 ]; do - bash ../createStep1.bash $COUNTER - bash ../startStep1.bash - sleep 15m - bash ../createStep2.bash $COUNTER - bash ../startStep2.bash - let COUNTER=COUNTER+1 -done - -bash ../createStep1.bash 10 True -bash ../startStep1.bash -sleep 15m -bash ../createStep2.bash 10 -bash ../startStep2.bash diff --git a/Alignment/APEEstimation/test/cfgTemplateData/manual_run15Iterations.bash b/Alignment/APEEstimation/test/cfgTemplateData/manual_run15Iterations.bash deleted file mode 100644 index cf3bc813dbc03..0000000000000 --- a/Alignment/APEEstimation/test/cfgTemplateData/manual_run15Iterations.bash +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -COUNTER="0" - -# if data samples are larger switch to longer sleep times -while [ $COUNTER -lt 15 ]; do - bash ../createStep1.bash $COUNTER - bash ../startStep1.bash - sleep 15m - bash ../createStep2.bash $COUNTER - bash ../startStep2.bash - let COUNTER=COUNTER+1 -done - -bash ../createStep1.bash 15 True -bash ../startStep1.bash -sleep 15m -bash ../createStep2.bash 15 -bash ../startStep2.bash diff --git a/Alignment/APEEstimation/test/cfgTemplateData/run10Iterations.bash b/Alignment/APEEstimation/test/cfgTemplateData/run10Iterations.bash deleted file mode 100644 index e23e55d6004f8..0000000000000 --- a/Alignment/APEEstimation/test/cfgTemplateData/run10Iterations.bash +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash - -COUNTER="0" - -nf=$(cat ../createStep1.bash | awk '/nFiles/{i++}i==2' | cut -d= -f 2) -echo "$nf files to be sent to job queue" - -do_files_exist(){ - fsExist=true - for ((index=1;index<=$1;index++)) - do - if [ -e "error${index}.txt" ]; then - : - else - fsExist=false - fi - done - -} - - -# if data samples are larger switch to longer sleep times -rm error* output* -while [ $COUNTER -lt 10 ]; do - bash ../createStep1.bash $COUNTER - bash ../startStep1.bash - - while : - do - fsExist=false - sleep 1m - do_files_exist $nf - if [ $fsExist = true ]; then - break - fi - done - sleep 1m - bash ../createStep2.bash $COUNTER - bash ../startStep2.bash - let COUNTER=COUNTER+1 - rm error* output* -done - -bash ../createStep1.bash 10 True -bash ../startStep1.bash - -while : -do - sleep 1m - do_files_exist $nf - if [ $fsExist = true ]; then - break - fi -done - -sleep 1m -bash ../createStep2.bash 10 -bash ../startStep2.bash diff --git a/Alignment/APEEstimation/test/cfgTemplateData/run15Iterations.bash b/Alignment/APEEstimation/test/cfgTemplateData/run15Iterations.bash deleted file mode 100644 index d012978c3757f..0000000000000 --- a/Alignment/APEEstimation/test/cfgTemplateData/run15Iterations.bash +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash - -COUNTER="0" - -nf=$(cat ../createStep1.bash | awk '/nFiles/{i++}i==2' | cut -d= -f 2) -echo "$nf files to be sent to job queue" - -do_files_exist(){ - fsExist=true - for ((index=1;index<=$1;index++)) - do - if [ -e "error${index}.txt" ]; then - : - else - fsExist=false - fi - done - -} - - -# if data samples are larger switch to longer sleep times -rm error* output* -while [ $COUNTER -lt 15 ]; do - bash ../createStep1.bash $COUNTER - bash ../startStep1.bash - - while : - do - fsExist=false - sleep 1m - do_files_exist $nf - if [ $fsExist = true ]; then - break - fi - done - sleep 1m - bash ../createStep2.bash $COUNTER - bash ../startStep2.bash - let COUNTER=COUNTER+1 - rm error* output* -done - -bash ../createStep1.bash 15 True -bash ../startStep1.bash - -while : -do - sleep 1m - do_files_exist $nf - if [ $fsExist = true ]; then - break - fi -done - -sleep 1m -bash ../createStep2.bash 15 -bash ../startStep2.bash diff --git a/Alignment/APEEstimation/test/cfgTemplateDesign/createStep1.bash b/Alignment/APEEstimation/test/cfgTemplateDesign/createStep1.bash deleted file mode 100644 index eb59100ab3c6b..0000000000000 --- a/Alignment/APEEstimation/test/cfgTemplateDesign/createStep1.bash +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - - - -if [ ! $# -ge 1 ]; then - echo "Usage: $0 iterationNumber" - echo "Usage: $0 iterationNumber lastIteration" - exit 1 -fi - -export iterationNumber="$1" -export lastIteration="False" -if [ $# == 2 ]; then - lastIteration="$2"; - if [[ ! "$lastIteration" == False ]] && [[ ! "$lastIteration" == True ]] ; then - echo "Invalid argument for lastIteration: $lastIteration" - exit 2 - fi -fi - -echo "Iteration number: $1" -echo "LastIteration: ${lastIteration}" -echo - - - - - -## Alignment -export alignmentRcd="design" -#export alignmentRcd="idealAligned" -echo "Alignment Record: $alignmentRcd" -echo - - - -## Script to create submit scripts for specific dataset -createStep1="${CMSSW_BASE}/src/Alignment/APEEstimation/test/cfgTemplate/writeSubmitScript.sh" - -## identification name of dataset -export datasetName -## number of input files -export nFiles -## Input file base -cafDir="\/store\/caf\/user\/cschomak\/Skims\/MC\/PhaseI\/DY" -export inputBase - -datasetName="MC_TkAlZMuMu_PhaseI_Fall16_81X_DY_" -inputBase="${cafDir}\/${datasetName}" -nFiles=1 -bash $createStep1 $datasetName $nFiles $iterationNumber $lastIteration $alignmentRcd $inputBase - - - diff --git a/Alignment/APEEstimation/test/cfgTemplateMc/createStep1.bash b/Alignment/APEEstimation/test/cfgTemplateMc/createStep1.bash deleted file mode 100644 index 73bb6dac22ee9..0000000000000 --- a/Alignment/APEEstimation/test/cfgTemplateMc/createStep1.bash +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash - - - -if [ ! $# -ge 1 ]; then - echo "Usage: $0 iterationNumber" - echo "Usage: $0 iterationNumber lastIteration" - exit 1 -fi - -export iterationNumber="$1" -export lastIteration="False" -if [ $# == 2 ]; then - lastIteration="$2"; - if [[ ! "$lastIteration" == False ]] && [[ ! "$lastIteration" == True ]] ; then - echo "Invalid argument for lastIteration: $lastIteration" - exit 2 - fi -fi - -echo "Iteration number: $1" -echo "LastIteration: ${lastIteration}" -echo - - - - - -## Alignment -#~ export alignmentRcd="globalTag" -export alignmentRcd="misalTest" -#export alignmentRcd="idealAligned" -echo "Alignment Record: $alignmentRcd" -echo - - - -## Script to create submit scripts for specific dataset -createStep1="${CMSSW_BASE}/src/Alignment/APEEstimation/test/cfgTemplate/writeSubmitScript.sh" - -## identification name of dataset -export datasetName -## number of input files -export nFiles -## Input file base -cafDir="\/store\/caf\/user\/cschomak\/Skims\/MC\/PhaseI\/DY" -export inputBase - -datasetName="MC_TkAlZMuMu_PhaseI_Fall16_81X_DY_" -inputBase="${cafDir}\/${datasetName}" -nFiles=1 -bash $createStep1 $datasetName $nFiles $iterationNumber $lastIteration $alignmentRcd $inputBase diff --git a/Alignment/APEEstimation/test/cfgTemplateMc/manual_run10Iterations.bash b/Alignment/APEEstimation/test/cfgTemplateMc/manual_run10Iterations.bash deleted file mode 100644 index 2890c6957854e..0000000000000 --- a/Alignment/APEEstimation/test/cfgTemplateMc/manual_run10Iterations.bash +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -COUNTER="0" - -# if data samples are larger switch to longer sleep times -while [ $COUNTER -lt 10 ]; do - bash ../createStep1.bash $COUNTER - bash ../startStep1.bash - sleep 15m - bash ../createStep2.bash $COUNTER - bash ../startStep2.bash - let COUNTER=COUNTER+1 -done - -bash ../createStep1.bash 10 True -bash ../startStep1.bash -sleep 15m -bash ../createStep2.bash 10 -bash ../startStep2.bash diff --git a/Alignment/APEEstimation/test/cfgTemplateMc/manual_run15Iterations.bash b/Alignment/APEEstimation/test/cfgTemplateMc/manual_run15Iterations.bash deleted file mode 100644 index cf3bc813dbc03..0000000000000 --- a/Alignment/APEEstimation/test/cfgTemplateMc/manual_run15Iterations.bash +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -COUNTER="0" - -# if data samples are larger switch to longer sleep times -while [ $COUNTER -lt 15 ]; do - bash ../createStep1.bash $COUNTER - bash ../startStep1.bash - sleep 15m - bash ../createStep2.bash $COUNTER - bash ../startStep2.bash - let COUNTER=COUNTER+1 -done - -bash ../createStep1.bash 15 True -bash ../startStep1.bash -sleep 15m -bash ../createStep2.bash 15 -bash ../startStep2.bash diff --git a/Alignment/APEEstimation/test/cfgTemplateMc/run10Iterations.bash b/Alignment/APEEstimation/test/cfgTemplateMc/run10Iterations.bash deleted file mode 100644 index e23e55d6004f8..0000000000000 --- a/Alignment/APEEstimation/test/cfgTemplateMc/run10Iterations.bash +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash - -COUNTER="0" - -nf=$(cat ../createStep1.bash | awk '/nFiles/{i++}i==2' | cut -d= -f 2) -echo "$nf files to be sent to job queue" - -do_files_exist(){ - fsExist=true - for ((index=1;index<=$1;index++)) - do - if [ -e "error${index}.txt" ]; then - : - else - fsExist=false - fi - done - -} - - -# if data samples are larger switch to longer sleep times -rm error* output* -while [ $COUNTER -lt 10 ]; do - bash ../createStep1.bash $COUNTER - bash ../startStep1.bash - - while : - do - fsExist=false - sleep 1m - do_files_exist $nf - if [ $fsExist = true ]; then - break - fi - done - sleep 1m - bash ../createStep2.bash $COUNTER - bash ../startStep2.bash - let COUNTER=COUNTER+1 - rm error* output* -done - -bash ../createStep1.bash 10 True -bash ../startStep1.bash - -while : -do - sleep 1m - do_files_exist $nf - if [ $fsExist = true ]; then - break - fi -done - -sleep 1m -bash ../createStep2.bash 10 -bash ../startStep2.bash diff --git a/Alignment/APEEstimation/test/cfgTemplateMc/run15Iterations.bash b/Alignment/APEEstimation/test/cfgTemplateMc/run15Iterations.bash deleted file mode 100644 index d012978c3757f..0000000000000 --- a/Alignment/APEEstimation/test/cfgTemplateMc/run15Iterations.bash +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash - -COUNTER="0" - -nf=$(cat ../createStep1.bash | awk '/nFiles/{i++}i==2' | cut -d= -f 2) -echo "$nf files to be sent to job queue" - -do_files_exist(){ - fsExist=true - for ((index=1;index<=$1;index++)) - do - if [ -e "error${index}.txt" ]; then - : - else - fsExist=false - fi - done - -} - - -# if data samples are larger switch to longer sleep times -rm error* output* -while [ $COUNTER -lt 15 ]; do - bash ../createStep1.bash $COUNTER - bash ../startStep1.bash - - while : - do - fsExist=false - sleep 1m - do_files_exist $nf - if [ $fsExist = true ]; then - break - fi - done - sleep 1m - bash ../createStep2.bash $COUNTER - bash ../startStep2.bash - let COUNTER=COUNTER+1 - rm error* output* -done - -bash ../createStep1.bash 15 True -bash ../startStep1.bash - -while : -do - sleep 1m - do_files_exist $nf - if [ $fsExist = true ]; then - break - fi -done - -sleep 1m -bash ../createStep2.bash 15 -bash ../startStep2.bash diff --git a/Alignment/APEEstimation/test/plottingTools/drawIterations.py b/Alignment/APEEstimation/test/plottingTools/drawIterations.py new file mode 100644 index 0000000000000..c45903f8443ff --- /dev/null +++ b/Alignment/APEEstimation/test/plottingTools/drawIterations.py @@ -0,0 +1,17 @@ +# Implementation to draw all iterations of an APE measurement +# to check convergence + +from iterationsPlotter import * +from granularity import * + +try: + base = os.environ['CMSSW_BASE']+"/src/Alignment/APEEstimation" +except KeyError: + base = "" + +plot = IterationsPlotter() +plot.setOutputPath(base+"/workingArea/") +plot.setInputFile(base+"/workingArea/iter14/allData_iterationApe.root") +plot.setTitle("") +plot.setGranularity(standardGranularity) +plot.draw() diff --git a/Alignment/APEEstimation/test/plottingTools/drawResults.py b/Alignment/APEEstimation/test/plottingTools/drawResults.py new file mode 100644 index 0000000000000..3fa306931d2d4 --- /dev/null +++ b/Alignment/APEEstimation/test/plottingTools/drawResults.py @@ -0,0 +1,21 @@ +# Implementation to draw all iterations of an APE measurement +# to check convergence + +import ROOT +from resultPlotter import * +from systematics import * +from granularity import * + +try: + base = os.environ['CMSSW_BASE']+"/src/Alignment/APEEstimation" +except KeyError: + base = "" + +plot = ResultPlotter() +plot.setOutputPath(base+"/workingArea/") +# internal name (used for example when adding systematic errors), path to file, label, color (optional) +plot.addInputFile("placeholder1", base+"/workingArea/iter14/allData_iterationApe.root", "measurement A") +plot.addInputFile("placeholder2", base+"/workingArea2/iter14/allData_iterationApe.root", "measurement B", ROOT.kRed) +plot.setTitle("") +plot.setGranularity(standardGranularity) +plot.draw() diff --git a/Alignment/APEEstimation/test/plottingTools/granularity.py b/Alignment/APEEstimation/test/plottingTools/granularity.py new file mode 100644 index 0000000000000..98f4f01cf22a2 --- /dev/null +++ b/Alignment/APEEstimation/test/plottingTools/granularity.py @@ -0,0 +1,20 @@ +class Granularity: + def __init__(self): + self.sectors = {} + self.names = {} + self.sectors["X"] = [] + self.names["X"] = [] + self.sectors["Y"] = [] + self.names["Y"] = [] + +# manually create different granularities here +standardGranularity = Granularity() +standardGranularity.sectors["X"].append( (1, 14)) # BPIX/FPIX +standardGranularity.sectors["X"].append( (15, 26)) # TIB +standardGranularity.sectors["X"].append( (27, 38)) # TOB +standardGranularity.sectors["X"].append( (39, 48)) # TID +standardGranularity.sectors["X"].append( (49, 68)) # TEC +standardGranularity.names["X"] = ["PIXEL", "TIB", "TOB", "TID", "TEC"] + +standardGranularity.sectors["Y"].append( (1, 14)) # BPIX/FPIX +standardGranularity.names["Y"] = ["PIXEL",] diff --git a/Alignment/APEEstimation/test/plottingTools/iterationsPlotter.py b/Alignment/APEEstimation/test/plottingTools/iterationsPlotter.py new file mode 100644 index 0000000000000..5c38a8f52d073 --- /dev/null +++ b/Alignment/APEEstimation/test/plottingTools/iterationsPlotter.py @@ -0,0 +1,129 @@ +import ROOT +ROOT.gROOT.SetBatch(True) +from setTDRStyle import setTDRStyle + +# load some default things form there +from granularity import * + + +class IterationsPlotter: + def __init__(self): + setTDRStyle() + self.inFile = None + self.outPath = None + self.granularity = standardGranularity + self.title = "" + + def setInputFile(self, inFile): + self.inFile = inFile + + def setGranularity(self, granularity): + self.granularity = granularity + + def setOutputPath(self, outPath): + self.outPath = outPath + + def setTitle(self, title): + self.title = title + + def convertName(self, name): + out = name.replace("Bpix", "BPIX") + out = out.replace("Fpix", "FPIX") + out = out.replace("Plus", "+") + out = out.replace("Minus", "-") + out = out.replace("Fpix", "FPIX") + out = out.replace("Tib", "TIB") + out = out.replace("Tob", "TOB") + out = out.replace("Tid", "TID") + out = out.replace("Tec", "TEC") + out = out.replace("Layer", " L") + out = out.replace("Ring", " R") + out = out.replace("Stereo", "S") + out = out.replace("Rphi", "R") # different from Ring, this one does not add a space in front + out = out.replace("In", "i") + out = out.replace("Out", "o") + return out + + def makeHists(self, sectorRange, coordinate): + sectors = range(sectorRange[0],sectorRange[1]+1) + numSectors = len(sectors) + + fi = ROOT.TFile(self.inFile, "READ") + nameTree = fi.Get("nameTree") + nameTree.GetEntry(0) + apeTree = fi.Get("iterTree{}".format(coordinate)) + noEntries = apeTree.GetEntries() + + hists = [] + names = [] + maximum = 10 + for i,sector in enumerate(sectors): + hist = ROOT.TH1F("hist{}_{}".format(coordinate, sector), "", noEntries, 0-0.5, noEntries-0.5) + hist.SetTitle(";iteration number;#sigma_{align," + coordinate.lower() + "} [#mum]") + #~ hist.SetAxisRange(0.,100.,"Y") + hist.SetMarkerStyle(20+i) + hist.SetDirectory(0) + hists.append(hist) + no_it = 1 + for it in apeTree: + hist.SetBinContent(no_it, 10000. * (float(getattr(it, "Ape_Sector_{}".format(sector))))**0.5) + no_it += 1 + if hist.GetMaximum() > maximum: + maximum = hist.GetMaximum() + + sectorName = self.convertName(str(getattr(nameTree, "Ape_Sector_{}".format(sector)))) + names.append(sectorName) + + + + fi.Close() + return hists, names, maximum + + def draw(self): + for coordinate in self.granularity.sectors.keys(): + rangeList = self.granularity.sectors[coordinate] + for j, sectorRange in enumerate(rangeList): + self.canvas = ROOT.TCanvas("canvas", "canvas", int(ROOT.gStyle.GetCanvasDefW()*15/10.),ROOT.gStyle.GetCanvasDefH()) + ROOT.gPad.SetRightMargin(0.10) + + legend = ROOT.TLegend(0.2,0.73,0.85,0.93) + legend.SetFillColor(0) + legend.SetFillStyle(0) + legend.SetTextSize(0.025) + legend.SetMargin(0.30) + legend.SetBorderSize(0) + legend.SetNColumns(4) + + hists, names, maximum = self.makeHists(sectorRange, coordinate) + for i, hist in enumerate(hists): + if i == 0: + drawOption = "P0L" + else: + drawOption = "P0Lsame" + hist.SetMaximum(maximum*1.5) + hist.Draw(drawOption) + legend.AddEntry(hist, names[i], "PL") + legend.Draw() + + cmsText = ROOT.TLatex(0.16,0.96,self.title) + cmsText.SetTextFont(42) + cmsText.SetNDC() + cmsText.Draw("same") + + granularityText = ROOT.TLatex(0.9,0.96,self.granularity.names[coordinate][j]) + granularityText.SetTextAlign(31) + granularityText.SetTextFont(42) + granularityText.SetNDC() + granularityText.Draw("same") + + import os + if not os.path.isdir(self.outPath): + os.makedirs(self.outPath) + + self.canvas.SaveAs("{}/iterations_{}_{}.pdf".format(self.outPath, coordinate, self.granularity.names[coordinate][j])) + self.canvas = None +def main(): + pass + +if __name__ == "__main__": + main() diff --git a/Alignment/APEEstimation/test/plottingTools/resultPlotter.py b/Alignment/APEEstimation/test/plottingTools/resultPlotter.py new file mode 100644 index 0000000000000..1be503f44f2b4 --- /dev/null +++ b/Alignment/APEEstimation/test/plottingTools/resultPlotter.py @@ -0,0 +1,186 @@ +import ROOT +ROOT.gROOT.SetBatch(True) +from setTDRStyle import setTDRStyle + +from systematicErrors import * +from granularity import * + + +class ResultPlotter: + def __init__(self): + setTDRStyle() + self.names = {} + self.inFiles = {} + self.labels = {} + self.colors = {} + self.outPath = None + self.hasSystematics = {} + self.systematics = {} + self.granularity = standardGranularity + self.title = "" + + def addInputFile(self, name, inFile, label, color=None): + self.names[label] = name + self.inFiles[label] = inFile + self.labels[label] = label + self.systematics[name] = [] + self.hasSystematics[name] = False + if color != None: + self.colors[label] = color + else: + # choose first not occupied color (other than white) + for autoColor in range(1,100): + if autoColor not in self.colors.values() and not autoColor == 10: + self.colors[label] = autoColor + break + + def setGranularity(self, granularity): + self.granularity = granularity + + def setOutputPath(self, outPath): + self.outPath = outPath + + def setTitle(self, title): + self.title = title + + def doSystematics(self, name): + self.hasSystematics[name] = True + + def addSystematics(self, name, systematics, additive=True): + self.hasSystematics[name] = True + if not additive: + self.systematics[name] = [] + self.systematics[name].append(systematics) + + def convertName(self, name): + out = name.replace("Bpix", "BPIX") + out = out.replace("Fpix", "FPIX") + out = out.replace("Plus", "+") + out = out.replace("Minus", "-") + out = out.replace("Fpix", "FPIX") + out = out.replace("Tib", "TIB") + out = out.replace("Tob", "TOB") + out = out.replace("Tid", "TID") + out = out.replace("Tec", "TEC") + out = out.replace("Layer", " L") + out = out.replace("Ring", " R") + out = out.replace("Stereo", "S") + out = out.replace("Rphi", "R") # different from Ring, this one does not add a space in front + out = out.replace("In", "i") + out = out.replace("Out", "o") + return out + + def makeHist(self, name, sectorRange, coordinate, number): + sectors = range(sectorRange[0],sectorRange[1]+1) + numSectors = len(sectors) + + hist = ROOT.TH1F("{}hist{}_{}".format(name, number, coordinate), "", numSectors, 0, numSectors) + hist.SetTitle(";;#sigma_{align," + coordinate.lower() + "} [#mum]") + hist.SetAxisRange(0.,100.,"Y") + + syst = None + if self.hasSystematics[name]: + syst = ROOT.TGraphAsymmErrors() + + fi = ROOT.TFile(self.inFiles[name], "READ") + nameTree = fi.Get("nameTree") + apeTree = fi.Get("iterTree{}".format(coordinate)) + # Get last entries in branches (for apeTree) to retrieve result of this iteration + # in iterTreeX/Y, the results of the previous iterations are also stored + nameTree.GetEntry(0) + apeTree.GetEntry(apeTree.GetEntries()-1) + iBin = 1 + for sector in sectors: + sectorApe = 10000. * (float(getattr(apeTree, "Ape_Sector_{}".format(sector))))**0.5 + sectorName = self.convertName(str(getattr(nameTree, "Ape_Sector_{}".format(sector)))) + hist.SetBinContent(iBin, sectorApe) + hist.SetBinError(iBin, 0.0000001) + hist.GetXaxis().SetBinLabel(iBin, sectorName) + + if self.hasSystematics[name]: + sysErrUp = 0 + sysErrDn = 0 + # add up errors quadratically + for partError in self.systematics[name]: + scaleFac = 1.0 + if partError.isRelative[sector-1]: + scaleFac = sectorApe + + if partError.direction[sector-1] == DIR_BOTH: + sysErrUp += (scaleFac*partError[coordinate][sector-1])**2 + sysErrDn += (scaleFac*partError[coordinate][sector-1])**2 + elif partError.direction[sector-1] == DIR_DOWN: + sysErrDn += (scaleFac*partError[coordinate][sector-1])**2 + elif partError.direction[sector-1] == DIR_UP: + sysErrUp += (scaleFac*partError[coordinate][sector-1])**2 + sysErrUp = sysErrUp**0.5 + sysErrDn = sysErrDn**0.5 + binWidth = hist.GetXaxis().GetBinCenter(iBin) - hist.GetXaxis().GetBinLowEdge(iBin) + syst.SetPoint(iBin, hist.GetXaxis().GetBinCenter(iBin), sectorApe) + syst.SetPointError(iBin, binWidth, binWidth, sysErrDn, sysErrUp) + + iBin += 1 + hist.SetDirectory(0) + fi.Close() + return hist, syst + + def draw(self): + for coordinate in self.granularity.sectors.keys(): + plotNumber = 0 + rangeList = self.granularity.sectors[coordinate] + for sectorRange in rangeList: + self.canvas = ROOT.TCanvas("canvas", "canvas", int(ROOT.gStyle.GetCanvasDefW()*len(range(sectorRange[0],sectorRange[1]+1))/10.),ROOT.gStyle.GetCanvasDefH()) + ROOT.gPad.SetRightMargin(0.10) + + legend = ROOT.TLegend(0.2,0.65,0.5,0.85) + legend.SetFillColor(0) + legend.SetFillStyle(0) + legend.SetTextSize(0.04) + legend.SetMargin(0.30) + legend.SetBorderSize(0) + + firstHist = True + histos = [] # need to save histos or they will be deleted right after variable is set to something else + systGraphs = [] # same for systematics errors + for name in self.inFiles.keys(): + if firstHist: + drawMode = "E0" + firstHist = False + else: + drawMode = "E0same" + histo, syst = self.makeHist(name, sectorRange, coordinate, plotNumber) + histo.SetMarkerColor(self.colors[name]) + histo.Draw(drawMode) + histos.append(histo) + legend.AddEntry(histo, self.labels[name], "p") + + if self.hasSystematics[name]: + syst.SetFillColor(self.colors[name]) + syst.SetFillStyle(3354) + syst.Draw("02same") + systGraphs.append(syst) + + legend.Draw() + self.canvas.Update() + + cmsText = ROOT.TLatex(0.16,0.96,self.title) + cmsText.SetTextFont(42) + cmsText.SetNDC() + cmsText.Draw("same") + + import os + if not os.path.isdir(self.outPath): + os.makedirs(self.outPath) + + self.canvas.SaveAs("{}/result_{}_{}.pdf".format(self.outPath, coordinate, self.granularity.names[coordinate][plotNumber])) + self.canvas = None + legend = None + histos = None + plotNumber += 1 + +def main(): + pass + + +if __name__ == "__main__": + main() diff --git a/Alignment/APEEstimation/test/plottingTools/setTDRStyle.py b/Alignment/APEEstimation/test/plottingTools/setTDRStyle.py new file mode 100644 index 0000000000000..055932a68bb21 --- /dev/null +++ b/Alignment/APEEstimation/test/plottingTools/setTDRStyle.py @@ -0,0 +1,170 @@ +from array import * + +def setTDRStyle(): + import ROOT + from ROOT import TStyle + from ROOT import kWhite + from ROOT import kTRUE + + + + + tdrStyle = TStyle("tdrStyle","Style for P-TDR") + + # For the canvas: + tdrStyle.SetCanvasBorderMode(0) + tdrStyle.SetCanvasColor(kWhite) + # For the canvas: + tdrStyle.SetCanvasBorderMode(0) + tdrStyle.SetCanvasColor(kWhite) + tdrStyle.SetCanvasDefH(600) #Height of canvas + tdrStyle.SetCanvasDefW(600)#Width of canvas + tdrStyle.SetCanvasDefX(0) #POsition on screen + tdrStyle.SetCanvasDefY(0) + + # For the Pad: + tdrStyle.SetPadBorderMode(0) + # tdrStyle->SetPadBorderSize(Width_t size = 1); + tdrStyle.SetPadColor(kWhite) + tdrStyle.SetPadGridX(0) + tdrStyle.SetPadGridY(0) + tdrStyle.SetGridColor(0) + tdrStyle.SetGridStyle(3) + tdrStyle.SetGridWidth(1) + + # For the frame: + tdrStyle.SetFrameBorderMode(0) + tdrStyle.SetFrameBorderSize(1) + tdrStyle.SetFrameFillColor(0) + tdrStyle.SetFrameFillStyle(0) + tdrStyle.SetFrameLineColor(1) + tdrStyle.SetFrameLineStyle(1) + tdrStyle.SetFrameLineWidth(1) + + # For the histo: + # tdrStyle->SetHistFillColor(1); + # tdrStyle->SetHistFillStyle(0); + tdrStyle.SetHistLineColor(1) + tdrStyle.SetHistLineStyle(0) + tdrStyle.SetHistLineWidth(1) + # tdrStyle->SetLegoInnerR(Float_t rad = 0.5); + + # define the palette for z axis + palette = [] + NRGBs = 5 + NCont = 255 + stops = array("d",[0.00,0.34,0.61,0.84,1.00]) + red = array("d",[0.50,0.50,1.00,1.00,1.00]) + green = array("d",[0.50,1.00,1.00,0.60,0.50]) + blue = array("d",[1.00,1.00,0.50,0.40,0.50]) + FI = ROOT.TColor.CreateGradientColorTable(NRGBs,stops,red,green,blue,NCont) + for i in range(0,NCont): + palette.append(FI+i) + tdrStyle.SetPalette(NCont,array("i",palette)); + tdrStyle.SetNumberContours(NCont); + + tdrStyle.SetEndErrorSize(2) + # tdrStyle->SetErrorMarker(20); + tdrStyle.SetErrorX(0.) + + tdrStyle.SetMarkerStyle(20) + + #For the fit/function: + tdrStyle.SetOptFit(0) + tdrStyle.SetFitFormat("5.4g") + tdrStyle.SetFuncColor(2) + tdrStyle.SetFuncStyle(1) + tdrStyle.SetFuncWidth(1) + + #For the date: + tdrStyle.SetOptDate(0) + # tdrStyle->SetDateX(Float_t x = 0.01); + # tdrStyle->SetDateY(Float_t y = 0.01); + + # For the statistics box: + tdrStyle.SetOptFile(0) + tdrStyle.SetOptStat(0) # To display the mean and RMS: SetOptStat("mr"); + tdrStyle.SetStatColor(kWhite) + tdrStyle.SetStatFont(42) + tdrStyle.SetStatFontSize(0.025) + tdrStyle.SetStatTextColor(1) + tdrStyle.SetStatFormat("6.4g") + tdrStyle.SetStatBorderSize(1) + tdrStyle.SetStatH(0.1) + tdrStyle.SetStatW(0.15) + # tdrStyle->SetStatStyle(Style_t style = 100.1); + # tdrStyle->SetStatX(Float_t x = 0); + # tdrStyle->SetStatY(Float_t y = 0); + + # Margins: + tdrStyle.SetPadTopMargin(0.05) + tdrStyle.SetPadBottomMargin(0.14) + tdrStyle.SetPadLeftMargin(0.16) + tdrStyle.SetPadRightMargin(0.05) + + # For the Global title: + tdrStyle.SetOptTitle(0) + tdrStyle.SetTitleFont(42) + tdrStyle.SetTitleColor(1) + tdrStyle.SetTitleTextColor(1) + tdrStyle.SetTitleFillColor(10) + tdrStyle.SetTitleFontSize(0.05) + # tdrStyle->SetTitleH(0); # Set the height of the title box + # tdrStyle->SetTitleW(0); # Set the width of the title box + # tdrStyle->SetTitleX(0); # Set the position of the title box + # tdrStyle->SetTitleY(0.985); # Set the position of the title box + # tdrStyle->SetTitleStyle(Style_t style = 100.1); + # tdrStyle->SetTitleBorderSize(2); + + # For the axis titles: + tdrStyle.SetTitleColor(1, "XYZ") + tdrStyle.SetTitleFont(42, "XYZ") + tdrStyle.SetTitleSize(0.06, "XYZ") + # tdrStyle->SetTitleXSize(Float_t size = 0.02); # Another way to set the size? + # tdrStyle->SetTitleYSize(Float_t size = 0.02); + tdrStyle.SetTitleXOffset(0.95) + tdrStyle.SetTitleYOffset(1.3) + # tdrStyle->SetTitleOffset(1.1, "Y"); # Another way to set the Offset + + # For the axis labels: + tdrStyle.SetLabelColor(1, "XYZ") + tdrStyle.SetLabelFont(42, "XYZ") + tdrStyle.SetLabelOffset(0.007, "XYZ") + tdrStyle.SetLabelSize(0.05, "XYZ") + + # For the axis: + tdrStyle.SetAxisColor(1, "XYZ") + tdrStyle.SetStripDecimals(kTRUE) + tdrStyle.SetTickLength(0.03, "XYZ") + tdrStyle.SetNdivisions(408, "XYZ") + + #~ tdrStyle->SetNdivisions(510, "XYZ"); + tdrStyle.SetPadTickX(1) # To get tick marks on the opposite side of the frame + tdrStyle.SetPadTickY(1) + + # Change for log plots: + tdrStyle.SetOptLogx(0) + tdrStyle.SetOptLogy(0) + tdrStyle.SetOptLogz(0) + + # Postscript options: + tdrStyle.SetPaperSize(20.,20.); + # tdrStyle->SetLineScalePS(Float_t scale = 3); + # tdrStyle->SetLineStyleString(Int_t i, const char* text); + # tdrStyle->SetHeaderPS(const char* header); + # tdrStyle->SetTitlePS(const char* pstitle); + + #tdrStyle->SetBarOffset(Float_t baroff = 0.5); + #tdrStyle->SetBarWidth(Float_t barwidth = 0.5); + #tdrStyle->SetPaintTextFormat(const char* format = "g"); + #~ tdrStyle.SetPalette(1) + #tdrStyle->SetTimeOffset(Double_t toffset); + #tdrStyle->SetHistMinimumZero(kTRUE); + + + + + ROOT.gROOT.ForceStyle() + + tdrStyle.cd() + return tdrStyle diff --git a/Alignment/APEEstimation/test/plottingTools/systematicErrors.py b/Alignment/APEEstimation/test/plottingTools/systematicErrors.py new file mode 100644 index 0000000000000..d9134f58ec708 --- /dev/null +++ b/Alignment/APEEstimation/test/plottingTools/systematicErrors.py @@ -0,0 +1,116 @@ +import numpy as np +import ROOT + +DIR_BOTH = 0 +DIR_UP = 1 +DIR_DOWN = -1 + +NUM_SECTORS = 68 +NUM_SECTORS_Y = 14 +# systematics has: + # a dict with with coordinate names "X", "Y" as keys + # - each value of these keys is a list/an array of systematic errors for each sector + # - so the list has the length of the number of sectors for that coordinate + # - these errors are quadratically added + # direction which can be DIR_DOWN, DIR_BOTH, or DIR_UP, depending on whether it adds only down, symmetric or up + # isRelative which is a flag telling whether the error is relative to the APE value or absolute + +class SystematicErrors: + def __init__(self): + self.X = np.zeros(NUM_SECTORS) + self.Y = np.zeros(NUM_SECTORS) + # is not made seperately for X and Y. If this is wanted, make two separate objects + self.isRelative = np.zeros(NUM_SECTORS, dtype=int) + self.direction = np.empty(NUM_SECTORS, dtype=int) + self.direction.fill(DIR_BOTH) # just so it is clear that + + def __getitem__(self, key): + return getattr(self, key) + + def getXFromList(self, X, startat=0): + for i,x in enumerate(X): + self.X[i+startat] = x + + def getYFromList(self, Y, startat=0): + for i,y in enumerate(Y): + self.Y[i+startat] = y + + # each line has the structure: xerr yerr isrel direction + def write(self, fileName): + with open(fileName, "w") as fi: + for x, y, rel, direc in zip(self.X, self.X, self.isRelative, self.direction): + fi.write("{} {} {} {}".format(x, y, rel, direc)) + + def read(self, fileName): + with open(fileName, "r") as fi: + sector = 0 + for line in fi: + x, y, rel, direc = line.rstrip().split(" ") + self.X[sector] = float(x) + self.Y[sector] = float(y) + self.isRelative[sector] = int(rel) + self.direction[sector] = int(direc) + sector += 1 + return self + +# difference between ape values in each sector +# returns a SystematicErrors object with values +def apeDifference(minuend, subtrahend): + fileA = ROOT.TFile(minuend, "READ") + fileB = ROOT.TFile(subtrahend, "READ") + apeTreeA_X = fileA.Get("iterTreeX") + apeTreeA_X.SetDirectory(0) + apeTreeB_X = fileB.Get("iterTreeX") + apeTreeB_X.SetDirectory(0) + apeTreeA_Y = fileA.Get("iterTreeY") + apeTreeA_Y.SetDirectory(0) + apeTreeB_Y = fileB.Get("iterTreeY") + apeTreeB_Y.SetDirectory(0) + + fileA.Close() + fileB.Close() + + # get to last iteration of each tree + apeTreeA_X.GetEntry(apeTreeA_X.GetEntries()-1) + apeTreeB_X.GetEntry(apeTreeB_X.GetEntries()-1) + apeTreeA_Y.GetEntry(apeTreeA_Y.GetEntries()-1) + apeTreeB_Y.GetEntry(apeTreeB_Y.GetEntries()-1) + + difference = SystematicErrors() + isRel = 0 + direc = 0 + + for sector in range(1, NUM_SECTORS+1): + name = "Ape_Sector_{}".format(sector) + + diffX = abs(getattr(apeTreeA_X, name) - getattr(apeTreeB_X, name)) + difference.X[sector-1] = diffX + if sector <= NUM_SECTORS_Y: + diffY = abs(getattr(apeTreeA_Y, name) - getattr(apeTreeB_Y, name)) + difference.Y[sector-1] = diffY + difference.isRel[sector-1] = isRel + difference.direction[sector-1] = direc + + return difference + + +# inFile is allData.root, not allData_iterationApe.root +# returns two arrays with values in x and y +def numberOfHits(inFileName): + inFile = ROOT.TFile(inFileName, "READ") + num_x = np.zeros(NUM_SECTORS, dtype=int) + num_y = np.zeros(NUM_SECTORS, dtype=int) + for sector in range(1, NUM_SECTORS+1): + xhist = inFile.Get("ApeEstimator1/Sector_{}/Results/h_ResX".format(sector)) + num_x[sector-1] = xhist.GetEntries() + if sector <= NUM_SECTORS_Y: + yhist = inFile.Get("ApeEstimator1/Sector_{}/Results/h_ResY".format(sector)) + num_y[sector-1] = yhist.GetEntries() + inFile.Close() + return num_x, num_y + +def main(): + pass + +if __name__ == "__main__": + main()