Skip to content

Commit

Permalink
Merge pull request #13516 from hengne/from-CMSSW_8_0_0_patch1_relval_…
Browse files Browse the repository at this point in the history
…feb28_rebase

relval matrix updates
  • Loading branch information
cmsbuild committed Mar 4, 2016
2 parents ef8ce2c + 1274242 commit 7e4152f
Show file tree
Hide file tree
Showing 12 changed files with 109 additions and 201 deletions.
2 changes: 1 addition & 1 deletion Configuration/AlCa/python/autoCond.py
Expand Up @@ -34,7 +34,7 @@
# GlobalTag for Run2 HLT for HI: it points to the online GT
'run2_hlt_hi' : '80X_dataRun2_HLTHI_frozen_v3',
# GlobalTag for MC production with perfectly aligned and calibrated detector for Phase1 2017
'phase1_2017_design' : '80X_upgrade2017_design_v3',
'phase1_2017_design' : '80X_upgrade2017_design_v4',
# GlobalTag for MC production with perfectly aligned and calibrated detector for Phase1 2019
'phase1_2019_design' : 'DES19_70_V2', # placeholder (GT not meant for standard RelVal)
# GlobalTag for MC production with perfectly aligned and calibrated detector for Phase2
Expand Down
19 changes: 10 additions & 9 deletions Configuration/PyReleaseValidation/python/MatrixInjector.py
Expand Up @@ -104,7 +104,7 @@ def __init__(self,opt,mode='init',options=''):
"unmergedLFNBase" : "/store/unmerged",
"mergedLFNBase" : "/store/relval",
"dashboardActivity" : "relval",
"Multicore" : opt.nThreads,
"Multicore" : 1, # do not set multicore for the whole chain
"Memory" : 3000,
"SizePerEvent" : 1234,
"TimePerEvent" : 0.1
Expand All @@ -113,7 +113,8 @@ def __init__(self,opt,mode='init',options=''):
self.defaultHarvest={
"EnableHarvesting" : "True",
"DQMUploadUrl" : self.dqmgui,
"DQMConfigCacheID" : None
"DQMConfigCacheID" : None,
"Multicore" : 1 # hardcode Multicore to be 1 for Harvest
}

self.defaultScratch={
Expand All @@ -126,6 +127,7 @@ def __init__(self,opt,mode='init',options=''):
"Seeding" : "AutomaticSeeding", #Random seeding method
"PrimaryDataset" : None, #Primary Dataset to be created
"nowmIO": {},
"Multicore" : opt.nThreads, # this is the per-taskchain Multicore; it's the default assigned to a task if it has no value specified
"KeepOutput" : False
}
self.defaultInput={
Expand All @@ -136,6 +138,7 @@ def __init__(self,opt,mode='init',options=''):
"SplittingAlgo" : "LumiBased", #Splitting Algorithm
"LumisPerJob" : 10, #Size of jobs in terms of splitting algorithm
"nowmIO": {},
"Multicore" : opt.nThreads, # this is the per-taskchain Multicore; it's the default assigned to a task if it has no value specified
"KeepOutput" : False
}
self.defaultTask={
Expand All @@ -147,6 +150,7 @@ def __init__(self,opt,mode='init',options=''):
"SplittingAlgo" : "LumiBased", #Splitting Algorithm
"LumisPerJob" : 10, #Size of jobs in terms of splitting algorithm
"nowmIO": {},
"Multicore" : opt.nThreads, # this is the per-taskchain Multicore; it's the default assigned to a task if it has no value specified
"KeepOutput" : False
}

Expand Down Expand Up @@ -188,6 +192,7 @@ def prepare(self,mReader, directories, mode='init'):
wmsplit['RECOUP15']=5
wmsplit['RECOAODUP15']=5
wmsplit['DBLMINIAODMCUP15NODQM']=5


#import pprint
#pprint.pprint(wmsplit)
Expand Down Expand Up @@ -326,15 +331,11 @@ def prepare(self,mReader, directories, mode='init'):
if ('DBLMINIAODMCUP15NODQM' in step):
chainDict['nowmTasklist'][-1]['ProcessingString']=chainDict['nowmTasklist'][-1]['ProcessingString']+'_miniAOD'

# modify memory settings for Multicore processing
# implemented with reference to franzoni:multithread-CMSSW_7_4_3
if(chainDict['Multicore']>1):
if( chainDict['nowmTasklist'][-1]['Multicore'] ):
# the scaling factor of 1.2GB / thread is empirical and measured on a SECOND round of tests with PU samples
# the number of threads is assumed to be the same for all tasks
# the number of threads is NO LONGER assumed to be the same for all tasks
# https://hypernews.cern.ch/HyperNews/CMS/get/edmFramework/3509/1/1/1.html
chainDict['nowmTasklist'][-1]['Memory']= 3000 + int( chainDict['Multicore'] -1)*1200
# set also the overall memory to the same value; the agreement (in the phasing in) is that
chainDict['Memory'] = 3000 + int( chainDict['Multicore'] -1)*1200
chainDict['nowmTasklist'][-1]['Memory'] = 3000 + int( chainDict['nowmTasklist'][-1]['Multicore'] -1 )*1200

index+=1
#end of loop through steps
Expand Down
2 changes: 1 addition & 1 deletion Configuration/PyReleaseValidation/python/MatrixUtil.py
Expand Up @@ -47,7 +47,7 @@ def expandLsInterval(lumis):
return range(lumis[0],(lumis[1]+1))

from DPGAnalysis.Skims.golden_json_2015 import *
jsonFile2015 = findFileInPath("DPGAnalysis/Skims/data/Cert_246908-XXXXXX_13TeV_PromptReco_Collisions15_JSON.txt")
jsonFile2015 = findFileInPath("DPGAnalysis/Skims/data/Cert_13TeV_16Dec2015ReReco_Collisions15_25ns_JSON.txt")

import json
with open(jsonFile2015) as data_file:
Expand Down
2 changes: 1 addition & 1 deletion Configuration/PyReleaseValidation/python/WorkFlowRunner.py
Expand Up @@ -229,7 +229,7 @@ def closeCmd(i,ID):
cmd+=' --fileout file:step%s.root '%(istep,)
if self.jobReport:
cmd += ' --suffix "-j JobReport%s.xml " ' % istep
if self.nThreads > 1:
if (self.nThreads > 1) and ('HARVESTING' not in cmd) :
cmd += ' --nThreads %s' % self.nThreads
cmd+=closeCmd(istep,self.wf.nameId)

Expand Down
3 changes: 2 additions & 1 deletion Configuration/PyReleaseValidation/python/relval_2017.py
Expand Up @@ -15,7 +15,8 @@

numWFStart=10000
numWFSkip=200
numWFIB = [10060.0,10039.0,10046.0] ##2017 WFs to run in IB (TenMuE_0_200, TTbar, MinBias)
#2017 WFs to run in IB (TenMuE_0_200, TTbar, ZEE, MinBias, TTbar PU, ZEE PU)
numWFIB = [10021.0,10024.0,10025.0,10026.0,10023.0,10224.0,10225.0]
for i,key in enumerate(upgradeKeys):
numWF=numWFStart+i*numWFSkip
for frag in upgradeFragments:
Expand Down
7 changes: 7 additions & 0 deletions Configuration/PyReleaseValidation/python/relval_highstats.py
Expand Up @@ -82,3 +82,10 @@
workflows[13992502]=['',['TTbar_13_HS','DIGIUP15_PU25HS','RECOUP15_PU25HS','HARVESTUP15_PU25']]


## 2015HighLumi run High stats
workflows[134.99601] = ['',['RunJetHT2015HLHS','HLTDR2_25ns','RECODR2_25nsreHLT','HARVESTDR2_25nsreHLT']]
workflows[134.99602] = ['',['RunZeroBias2015HLHS','HLTDR2_25ns','RECODR2_25nsreHLT','HARVESTDR2_25nsreHLT']]
workflows[134.99603] = ['',['RunSingleMu2015HLHS','HLTDR2_25ns','RECODR2_25nsreHLT','HARVESTDR2_25nsreHLT']]



3 changes: 3 additions & 0 deletions Configuration/PyReleaseValidation/python/relval_standard.py
Expand Up @@ -285,6 +285,9 @@
workflows[1354] = ['', ['WpToENu_M-2000_13','DIGIUP15','RECOUP15','HARVESTUP15']]
workflows[1355] = ['', ['DisplacedSUSY_stopToBottom_M_300_1000mm_13','DIGIUP15','RECOUP15','HARVESTUP15']]

# fullSim 13TeV normal workflows wrapped from extended generator
workflows[1360] = ['', ['TTbar012Jets_NLO_Mad_py8_Evt_13','GENSIM_TuneCUETP8M1_13TeV_aMCatNLO_FXFX_5f_max2j_max1p_LHE_py8_Evt','DIGIUP15','RECOUP15','HARVESTUP15']]


### HI test ###

Expand Down
53 changes: 40 additions & 13 deletions Configuration/PyReleaseValidation/python/relval_steps.py
Expand Up @@ -186,6 +186,13 @@
steps['RunHLTPhy2015DHS']={'INPUT':InputInfo(dataSet='/HLTPhysics/Run2015D-v1/RAW',label='hltPhy2015DHS',events=100000,location='STD', ls=Run2015DHS)}


#### run2 2015 HighLumi High Stat workflows ##
# Run2015HLHS, 25ns, run 260627, JetHT: 2.9M, SingleMuon: 5.7M, ZeroBias: 1.6M
Run2015HLHS=selectedLS([260627])
steps['RunJetHT2015HLHS']={'INPUT':InputInfo(dataSet='/JetHT/Run2015D-v1/RAW',label='jetHT2015HLHT',events=100000,location='STD', ls=Run2015HLHS)}
steps['RunZeroBias2015HLHS']={'INPUT':InputInfo(dataSet='/ZeroBias/Run2015D-v1/RAW',label='zb2015HLHT',events=100000,location='STD', ls=Run2015HLHS)}
steps['RunSingleMu2015HLHS']={'INPUT':InputInfo(dataSet='/SingleMuon/Run2015D-v1/RAW',label='sigMu2015HLHT',events=100000,location='STD', ls=Run2015HLHS)}

def gen(fragment,howMuch):
global step1Defaults
return merge([{'cfg':fragment},howMuch,step1Defaults])
Expand Down Expand Up @@ -281,10 +288,10 @@ def identitySim(wf):
# THIS ABOVE IS NOT USED, AT THE MOMENT
'CMSSW_7_6_0_pre7-76X_mcRun2_asymptotic_v9_realBS-v1', # 3 - 13 TeV samples with GEN-SIM from 750_p4; also GEN-SIM-DIGI-RAW-HLTDEBUG for id tests
'CMSSW_7_3_0_pre1-PRE_LS172_V15_FastSim-v1', # 4 - fast sim GEN-SIM-DIGI-RAW-HLTDEBUG for id tests
'CMSSW_8_0_0_pre2-PU25ns_76X_mcRun2_asymptotic_v12-v1', # 5 - fullSim PU 25ns premix
'CMSSW_8_0_0_pre2-PU50ns_76X_mcRun2_startup_v11-v1', # 6 - fullSim PU 50ns premix
'CMSSW_8_0_0_pre2-76X_mcRun2_asymptotic_v12_FastSim-v1', # 7 - fastSim MinBias for mixing
'CMSSW_8_0_0_pre2-PU25ns_76X_mcRun2_asymptotic_v12_FastSim-v1', # 8 - fastSim premixed MinBias
'CMSSW_8_0_0-PU25ns_80X_mcRun2_asymptotic_v4-v1', # 5 - fullSim PU 25ns premix for 800pre6
'CMSSW_8_0_0-PU50ns_80X_mcRun2_startup_v4-v1', # 6 - fullSim PU 50ns premix for 800pre6
'CMSSW_8_0_0-80X_mcRun2_asymptotic_v4_FastSim-v1', # 7 - fastSim MinBias for mixing for 800pre6
'CMSSW_8_0_0-PU25ns_80X_mcRun2_asymptotic_v4_FastSim-v2', # 8 - fastSim premixed MinBias for 800pre6
'CMSSW_7_6_0_pre6-76X_mcRun2_HeavyIon_v4-v1', # 9 - Run2 HI GEN-SIM
'CMSSW_7_6_0-76X_mcRun2_asymptotic_v11-v1', # 10 - 13 TeV High Stats GEN-SIM
'CMSSW_7_6_0_pre7-76X_mcRun2_asymptotic_v9_realBS-v1', # 11 - 13 TeV High Stats MiniBias for mixing GEN-SIM
Expand Down Expand Up @@ -677,6 +684,21 @@ def identityFS(wf):
},
step1Defaults])

# transfer extendedgen step1 LHE to be used in a normal workflow
step1LHENormal = {'--relval' : '9000,50',
'--conditions' : 'auto:run2_mc',
'--beamspot' : 'Realistic50ns13TeVCollision',
}

# transfer extendedgen step1 GEN to GEN-SIM to be used in a normal workflow
step1GENNormal = {'--relval' : '9000,50',
'-s' : 'GEN,SIM',
'--conditions' : 'auto:run2_mc',
'--beamspot' : 'Realistic50ns13TeVCollision',
'--eventcontent': 'FEVTDEBUG',
'--datatier' : 'GEN-SIM',
'--era' : 'Run2_25ns',
}

steps['DYToll01234Jets_5f_LO_MLM_Madgraph_LHE_13TeV']=genvalid('Configuration/Generator/python/DYToll01234Jets_5f_LO_MLM_Madgraph_LHE_13TeV_cff.py',step1LHEDefaults)
steps['TTbar012Jets_5f_NLO_FXFX_Madgraph_LHE_13TeV']=genvalid('Configuration/Generator/python/TTbar012Jets_5f_NLO_FXFX_Madgraph_LHE_13TeV_cff.py',step1LHEDefaults)
Expand Down Expand Up @@ -755,6 +777,14 @@ def identityFS(wf):
steps['Hadronizer_TuneCUETP8M1_13TeV_MLM_5f_max4j_LHE_pythia8_Tauola_taurhonu']=genvalid('Hadronizer_TuneCUETP8M1_13TeV_MLM_5f_max4j_LHE_pythia8_Tauola_taurhonu_cff',step1HadronizerDefaults)
steps['GGToHtaurhonu_13TeV_pythia8-tauola']=genvalid('GGToHtautau_13TeV_pythia8_Tauola_taurhonu_cff',step1GenDefaults)

# normal fullSim workflows wrapping ext-gen workflows
# LHE step
steps['TTbar012Jets_NLO_Mad_py8_Evt_13']=merge([{'--relval':'29000,100'},step1LHENormal,steps['TTbar012Jets_5f_NLO_FXFX_Madgraph_LHE_13TeV']])

# GEN-SIM step
steps['GENSIM_TuneCUETP8M1_13TeV_aMCatNLO_FXFX_5f_max2j_max1p_LHE_py8_Evt'] = merge([step1GENNormal,steps['Hadronizer_TuneCUETP8M1_13TeV_aMCatNLO_FXFX_5f_max2j_max1p_LHE_pythia8_evtgen']])


#Sherpa
steps['sherpa_ZtoEE_0j_BlackHat_13TeV_MASTER']=genvalid('sherpa_ZtoEE_0j_BlackHat_13TeV_MASTER_cff',step1GenDefaults)
steps['sherpa_ZtoEE_0j_OpenLoops_13TeV_MASTER']=genvalid('sherpa_ZtoEE_0j_OpenLoops_13TeV_MASTER_cff',step1GenDefaults)
Expand Down Expand Up @@ -820,7 +850,7 @@ def identityFS(wf):
'--datatier' :'GEN-SIM-DIGI-RAW-HLTDEBUG',
'--eventcontent':'FEVTDEBUGHLT',
'--era' :'Run2_25ns',
'-n' :'10'
'-n' :'10',
}
step2Upg2015Defaults50ns = merge([{'-s':'DIGI:pdigi_valid,L1,DIGI2RAW,HLT:@relval50ns','--conditions':'auto:run2_mc_50ns','--era':'Run2_50ns'},step2Upg2015Defaults])

Expand Down Expand Up @@ -1008,7 +1038,7 @@ def identityFS(wf):
'--conditions' : 'auto:run1_mc',
'--no_exec' : '',
'--datatier' : 'GEN-SIM-RECO,DQMIO',
'--eventcontent': 'RECOSIM,DQM'
'--eventcontent': 'RECOSIM,DQM',
}
step3DefaultsAlCaCalo=merge([{'-s':'RAW2DIGI,L1Reco,RECO,EI,ALCA:EcalCalZElectron+EcalCalWElectron+EcalUncalZElectron+EcalUncalWElectron+HcalCalIsoTrk,VALIDATION,DQM'}, step3Defaults])

Expand Down Expand Up @@ -1142,7 +1172,7 @@ def identityFS(wf):
'--conditions':'auto:run1_data',
'--datatier':'ALCARECO',
'--eventcontent':'ALCARECO'}
steps['ALCAEXP']={'-s':'ALCA:PromptCalibProd+PromptCalibProdSiStrip+PromptCalibProdSiStripGains+PromptCalibProdSiPixelAli',
steps['ALCAEXP']={'-s':'ALCA:SiStripCalZeroBias+TkAlMinBias+DtCalib+Hotline+LumiPixelsMinBias+PromptCalibProd+PromptCalibProdSiStrip+PromptCalibProdSiStripGains+PromptCalibProdSiPixelAli',
'--conditions':'auto:run1_data',
'--datatier':'ALCARECO',
'--eventcontent':'ALCARECO'}
Expand Down Expand Up @@ -1435,10 +1465,7 @@ def identityFS(wf):
from Configuration.PyReleaseValidation.upgradeWorkflowComponents import *

defaultDataSets={}
defaultDataSets['Extended2023HGCalMuon']='CMSSW_6_2_0_SLHC20_patch1-DES23_62_V1_refHGCALV5-v'
defaultDataSets['Extended2023SHCalNoTaper']='CMSSW_6_2_0_SLHC20_patch1-DES23_62_V1_refSHNoTaper-v'
defaultDataSets['2019WithGEMAging']='CMSSW_6_2_0_SLHC20-DES19_62_V8_UPG2019withGEM-v'
defaultDataSets['2017']='CMSSW_8_0_0_pre2-76X_upgrade2017_design_v7_UPG17-v'
defaultDataSets['2017']='CMSSW_8_0_0_patch1-80X_upgrade2017_design_v4_UPG17-v'
keys=defaultDataSets.keys()
for key in keys:
defaultDataSets[key+'PU']=defaultDataSets[key]
Expand All @@ -1459,7 +1486,7 @@ def identityFS(wf):
for ds in defaultDataSets:
key='MinBias_TuneZ2star_14TeV_pythia6'+'_'+ds
name=baseDataSetReleaseBetter[key]
PUDataSets[ds]={'-n':10,'--pileup':'AVE_35_BX_25ns','--pileup_input':'das:/RelValMinBias_TuneZ2star_14TeV/%s/GEN-SIM'%(name,)}
PUDataSets[ds]={'-n':10,'--pileup':'AVE_35_BX_25ns','--pileup_input':'das:/RelValMinBias_13/%s/GEN-SIM'%(name,)}


upgradeStepDict={}
Expand All @@ -1479,7 +1506,7 @@ def identityFS(wf):
upgradeStepDict['GenSimFull'][k]= {'-s' : 'GEN,SIM',
'-n' : 10,
'--conditions' : gt,
'--beamspot' : 'Gauss',
'--beamspot' : 'Realistic50ns13TeVCollision',
'--datatier' : 'GEN-SIM',
'--eventcontent': 'FEVTDEBUG',
'--geometry' : geom
Expand Down

0 comments on commit 7e4152f

Please sign in to comment.