Skip to content

Commit

Permalink
Merge pull request #1 from gem-sw/CMSSW_6_2_X_SLHC
Browse files Browse the repository at this point in the history
update
  • Loading branch information
Cesare committed Nov 6, 2013
2 parents ccd01af + c66a3e8 commit 9d779b7
Show file tree
Hide file tree
Showing 82 changed files with 4,929 additions and 745 deletions.
4 changes: 0 additions & 4 deletions Alignment/OfflineValidation/plugins/PrimaryVertexValidation.h
Expand Up @@ -25,10 +25,6 @@
#include "DataFormats/TrackReco/interface/TrackFwd.h"
#include "DataFormats/TrackingRecHit/interface/TrackingRecHit.h"
#include "DataFormats/DetId/interface/DetId.h"
#include "DataFormats/SiStripDetId/interface/TIBDetId.h"
#include "DataFormats/SiStripDetId/interface/TIDDetId.h"
#include "DataFormats/SiStripDetId/interface/TOBDetId.h"
#include "DataFormats/SiStripDetId/interface/TECDetId.h"
#include "DataFormats/SiStripDetId/interface/StripSubdetector.h"
#include "DataFormats/SiPixelDetId/interface/PixelSubdetector.h"
#include "DataFormats/TrackerRecHit2D/interface/SiStripRecHit2DCollection.h"
Expand Down
90 changes: 43 additions & 47 deletions Configuration/PyReleaseValidation/python/MatrixInjector.py
Expand Up @@ -60,7 +60,7 @@ def __init__(self,opt,mode='init',options=''):
self.dqmgui="https://cmsweb.cern.ch/dqm/relval"
#couch stuff
self.couch = 'https://'+self.wmagent+'/couchdb'
# self.couchDB = 'reqmgr_config_cache'
# self.couchDB = 'reqmgr_config_cache'
self.couchCache={} # so that we do not upload like crazy, and recyle cfgs
self.user = os.getenv('USER')
self.group = 'ppd'
Expand All @@ -79,24 +79,22 @@ def __init__(self,opt,mode='init',options=''):
print '\n\tFound wmclient\n'

self.defaultChain={
"RequestType" : "TaskChain", #this is how we handle relvals
"AcquisitionEra": {}, #Acq Era
"ProcessingString": {}, # processing string to label the dataset
"Requestor": self.user, #Person responsible
"Group": self.group, #group for the request
"CMSSWVersion": os.getenv('CMSSW_VERSION'), #CMSSW Version (used for all tasks in chain)
"Campaign": os.getenv('CMSSW_VERSION'), # only for wmstat purpose
"ScramArch": os.getenv('SCRAM_ARCH'), #Scram Arch (used for all tasks in chain)
"ProcessingVersion": self.version, #Processing Version (used for all tasks in chain)
"GlobalTag": None, #Global Tag (overridden per task)
"CouchURL": self.couch, #URL of CouchDB containing Config Cache
"ConfigCacheURL": self.couch, #URL of CouchDB containing Config Cache
"RequestType" : "TaskChain", #this is how we handle relvals
"Requestor": self.user, #Person responsible
"Group": self.group, #group for the request
"CMSSWVersion": os.getenv('CMSSW_VERSION'), #CMSSW Version (used for all tasks in chain)
"Campaign": os.getenv('CMSSW_VERSION'), # only for wmstat purpose
"ScramArch": os.getenv('SCRAM_ARCH'), #Scram Arch (used for all tasks in chain)
"ProcessingVersion": self.version, #Processing Version (used for all tasks in chain)
"GlobalTag": None, #Global Tag (overridden per task)
"CouchURL": self.couch, #URL of CouchDB containing Config Cache
"ConfigCacheURL": self.couch, #URL of CouchDB containing Config Cache
"DbsUrl": "http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet",
#"CouchDBName": self.couchDB, #Name of Couch Database containing config cache
#"CouchDBName": self.couchDB, #Name of Couch Database containing config cache
#- Will contain all configs for all Tasks
"SiteWhitelist" : ["T2_CH_CERN", "T1_US_FNAL"], #Site whitelist
"TaskChain" : None, #Define number of tasks in chain.
"nowmTasklist" : [], #a list of tasks as we put them in
#"SiteWhitelist" : ["T2_CH_CERN", "T1_US_FNAL"], #Site whitelist
"TaskChain" : None, #Define number of tasks in chain.
"nowmTasklist" : [], #a list of tasks as we put them in
"unmergedLFNBase" : "/store/unmerged",
"mergedLFNBase" : "/store/relval",
"dashboardActivity" : "relval",
Expand All @@ -106,41 +104,41 @@ def __init__(self,opt,mode='init',options=''):
}

self.defaultHarvest={
"EnableDQMHarvest" : 1,
"EnableHarvesting" : "True",
"DQMUploadUrl" : self.dqmgui,
"DQMConfigCacheID" : None
}

self.defaultScratch={
"TaskName" : None, #Task Name
"ConfigCacheID" : None, #Generator Config id
"TaskName" : None, #Task Name
"ConfigCacheID" : None, #Generator Config id
"GlobalTag": None,
"SplittingAlgorithm" : "EventBased", #Splitting Algorithm
"SplittingArguments" : {"events_per_job" : None}, #Size of jobs in terms of splitting algorithm
"RequestNumEvents" : None, #Total number of events to generate
"Seeding" : "AutomaticSeeding", #Random seeding method
"PrimaryDataset" : None, #Primary Dataset to be created
"SplittingAlgo" : "EventBased", #Splitting Algorithm
"EventsPerJob" : None, #Size of jobs in terms of splitting algorithm
"RequestNumEvents" : None, #Total number of events to generate
"Seeding" : "AutomaticSeeding", #Random seeding method
"PrimaryDataset" : None, #Primary Dataset to be created
"nowmIO": {},
"KeepOutput" : False
}
self.defaultInput={
"TaskName" : "DigiHLT", #Task Name
"ConfigCacheID" : None, #Processing Config id
"TaskName" : "DigiHLT", #Task Name
"ConfigCacheID" : None, #Processing Config id
"GlobalTag": None,
"InputDataset" : None, #Input Dataset to be processed
"SplittingAlgorithm" : "LumiBased", #Splitting Algorithm
"SplittingArguments" : {"lumis_per_job" : 10}, #Size of jobs in terms of splitting algorithm
"InputDataset" : None, #Input Dataset to be processed
"SplittingAlgo" : "LumiBased", #Splitting Algorithm
"LumisPerJob" : 10, #Size of jobs in terms of splitting algorithm
"nowmIO": {},
"KeepOutput" : False
}
self.defaultTask={
"TaskName" : None, #Task Name
"InputTask" : None, #Input Task Name (Task Name field of a previous Task entry)
"InputFromOutputModule" : None, #OutputModule name in the input task that will provide files to process
"ConfigCacheID" : None, #Processing Config id
"TaskName" : None, #Task Name
"InputTask" : None, #Input Task Name (Task Name field of a previous Task entry)
"InputFromOutputModule" : None, #OutputModule name in the input task that will provide files to process
"ConfigCacheID" : None, #Processing Config id
"GlobalTag": None,
"SplittingAlgorithm" : "LumiBased", #Splitting Algorithm
"SplittingArguments" : {"lumis_per_job" : 10}, #Size of jobs in terms of splitting algorithm
"SplittingAlgo" : "LumiBased", #Splitting Algorithm
"LumisPerJob" : 10, #Size of jobs in terms of splitting algorithm
"nowmIO": {},
"KeepOutput" : False
}
Expand All @@ -155,7 +153,7 @@ def prepare(self,mReader, directories, mode='init'):
wmsplit['DIGIHI']=5
wmsplit['RECOHI']=5
wmsplit['HLTD']=5
wmsplit['RECODreHLT']=2
wmsplit['RECODreHLT']=2
wmsplit['DIGIPU']=4
wmsplit['DIGIPU1']=4
wmsplit['RECOPU1']=1
Expand All @@ -169,7 +167,7 @@ def prepare(self,mReader, directories, mode='init'):
wmsplit['TTbarFS_ID']=1

#import pprint
#pprint.pprint(wmsplit)
#pprint.pprint(wmsplit)
except:
print "Not set up for step splitting"
wmsplit={}
Expand Down Expand Up @@ -214,7 +212,7 @@ def prepare(self,mReader, directories, mode='init'):
arg=s[2][index].split()
ns=map(int,arg[arg.index('--relval')+1].split(','))
chainDict['nowmTasklist'][-1]['RequestNumEvents'] = ns[0]
chainDict['nowmTasklist'][-1]['SplittingArguments']['events_per_job'] = ns[1]
chainDict['nowmTasklist'][-1]['EventsPerJob'] = ns[1]
if 'FASTSIM' in s[2][index] or '--fast' in s[2][index]:
thisLabel+='_FastSim'

Expand All @@ -227,9 +225,9 @@ def prepare(self,mReader, directories, mode='init'):
return -15
chainDict['nowmTasklist'][-1]['InputDataset']=nextHasDSInput.dataSet
splitForThisWf=nextHasDSInput.split
chainDict['nowmTasklist'][-1]['SplittingArguments']['lumis_per_job']=splitForThisWf
chainDict['nowmTasklist'][-1]['LumisPerJob']=splitForThisWf
if step in wmsplit:
chainDict['nowmTasklist'][-1]['SplittingArguments']['lumis_per_job']=wmsplit[step]
chainDict['nowmTasklist'][-1]['LumisPerJob']=wmsplit[step]
# get the run numbers or #events
if len(nextHasDSInput.run):
chainDict['nowmTasklist'][-1]['RunWhitelist']=nextHasDSInput.run
Expand All @@ -252,9 +250,9 @@ def prepare(self,mReader, directories, mode='init'):
print "Failed to find",'%s/%s.io'%(dir,step),".The workflows were probably not run on cfg not created"
return -15
if splitForThisWf:
chainDict['nowmTasklist'][-1]['SplittingArguments']['lumis_per_job']=splitForThisWf
chainDict['nowmTasklist'][-1]['LumisPerJob']=splitForThisWf
if step in wmsplit:
chainDict['nowmTasklist'][-1]['SplittingArguments']['lumis_per_job']=wmsplit[step]
chainDict['nowmTasklist'][-1]['LumisPerJob']=wmsplit[step]

#print step
chainDict['nowmTasklist'][-1]['TaskName']=step
Expand Down Expand Up @@ -314,7 +312,7 @@ def prepare(self,mReader, directories, mode='init'):

## there is in fact only one acquisition era
#if len(set(chainDict['AcquisitionEra'].values()))==1:
# print "setting only one acq"
# print "setting only one acq"
if acqEra:
chainDict['AcquisitionEra'] = chainDict['AcquisitionEra'].values()[0]

Expand All @@ -336,7 +334,7 @@ def prepare(self,mReader, directories, mode='init'):
chainDict['Task%d'%(itask)]=t


##
##


## provide the number of tasks
Expand Down Expand Up @@ -421,5 +419,3 @@ def submit(self):
print "...........",n,"submitted"
random_sleep()



Expand Up @@ -40,10 +40,8 @@
#include "DataFormats/TrackReco/interface/Track.h"
#include "DataFormats/TrackReco/interface/TrackExtra.h"
#include "DataFormats/SiStripDetId/interface/StripSubdetector.h"
#include "DataFormats/SiStripDetId/interface/TECDetId.h"
#include "DataFormats/SiStripDetId/interface/TIBDetId.h"
#include "DataFormats/SiStripDetId/interface/TIDDetId.h"
#include "DataFormats/SiStripDetId/interface/TOBDetId.h"
#include "DataFormats/TrackerCommon/interface/TrackerTopology.h"
#include "Geometry/Records/interface/IdealGeometryRecord.h"
#include "DataFormats/SiStripCluster/interface/SiStripCluster.h"
#include "DataFormats/SiStripCluster/interface/SiStripClusterCollection.h"
#include "DataFormats/TrackerRecHit2D/interface/SiPixelRecHit.h"
Expand Down Expand Up @@ -116,51 +114,51 @@ SiStripFineDelayHit::~SiStripFineDelayHit()
//
// member functions
//
std::pair<uint32_t, uint32_t> SiStripFineDelayHit::deviceMask(const StripSubdetector::SubDetector subdet,const int substructure)
std::pair<uint32_t, uint32_t> SiStripFineDelayHit::deviceMask(const StripSubdetector::SubDetector subdet,const int substructure, const TrackerTopology *tTopo)
{
uint32_t rootDetId = 0;
uint32_t maskDetId = 0;
switch(subdet){
case (int)StripSubdetector::TIB :
{
rootDetId = TIBDetId(substructure,0,0,0,0,0).rawId();
maskDetId = TIBDetId(15,0,0,0,0,0).rawId();
rootDetId = tTopo->tibDetId(substructure,0,0,0,0,0).rawId();
maskDetId = tTopo->tibDetId(15,0,0,0,0,0).rawId();
break;
}
case (int)StripSubdetector::TID :
{
rootDetId = TIDDetId(substructure>0 ? 2 : 1,abs(substructure),0,0,0,0).rawId();
maskDetId = TIDDetId(3,15,0,0,0,0).rawId();
rootDetId = tTopo->tidDetId(substructure>0 ? 2 : 1,abs(substructure),0,0,0,0).rawId();
maskDetId = tTopo->tidDetId(3,15,0,0,0,0).rawId();
break;
}
case (int)StripSubdetector::TOB :
{
rootDetId = TOBDetId(substructure,0,0,0,0).rawId();
maskDetId = TOBDetId(15,0,0,0,0).rawId();
rootDetId = tTopo->tobDetId(substructure,0,0,0,0).rawId();
maskDetId = tTopo->tobDetId(15,0,0,0,0).rawId();
break;
}
case (int)StripSubdetector::TEC :
{
rootDetId = TECDetId(substructure>0 ? 2 : 1,abs(substructure),0,0,0,0,0).rawId();
maskDetId = TECDetId(3,15,0,0,0,0,0).rawId();
rootDetId = tTopo->tecDetId(substructure>0 ? 2 : 1,abs(substructure),0,0,0,0,0).rawId();
maskDetId = tTopo->tecDetId(3,15,0,0,0,0,0).rawId();
break;
}
}
return std::make_pair(maskDetId,rootDetId);
}

std::vector< std::pair<uint32_t,std::pair<double, double> > > SiStripFineDelayHit::detId(const TrackerGeometry& tracker,const reco::Track* tk, const std::vector<Trajectory>& trajVec, const StripSubdetector::SubDetector subdet,const int substructure)
std::vector< std::pair<uint32_t,std::pair<double, double> > > SiStripFineDelayHit::detId(const TrackerGeometry& tracker,const reco::Track* tk, const std::vector<Trajectory>& trajVec, const TrackerTopology *tTopo, const StripSubdetector::SubDetector subdet,const int substructure)
{
if(substructure==0xff) return detId(tracker,tk,trajVec,0,0);
if(substructure==0xff) return detId(tracker,tk,trajVec,0,0,tTopo);
// first determine the root detId we are looking for
std::pair<uint32_t, uint32_t> mask = deviceMask(subdet,substructure);
std::pair<uint32_t, uint32_t> mask = deviceMask(subdet,substructure,tTopo);
// then call the method that loops on recHits
return detId(tracker,tk,trajVec,mask.first,mask.second);
return detId(tracker,tk,trajVec,mask.first,mask.second,tTopo);
}

std::vector< std::pair<uint32_t,std::pair<double, double> > > SiStripFineDelayHit::detId(const TrackerGeometry& tracker,const reco::Track* tk, const std::vector<Trajectory>& trajVec, const uint32_t& maskDetId, const uint32_t& rootDetId)
std::vector< std::pair<uint32_t,std::pair<double, double> > > SiStripFineDelayHit::detId(const TrackerGeometry& tracker,const reco::Track* tk, const std::vector<Trajectory>& trajVec, const uint32_t& maskDetId, const uint32_t& rootDetId, const TrackerTopology *tTopo)
{
bool onDisk = ((maskDetId==TIDDetId(3,15,0,0,0,0).rawId())||(maskDetId==TECDetId(3,15,0,0,0,0,0).rawId())) ;
bool onDisk = ((maskDetId==tTopo->tidDetId(3,15,0,0,0,0).rawId())||(maskDetId==tTopo->tecDetId(3,15,0,0,0,0,0).rawId())) ;
std::vector< std::pair<uint32_t,std::pair<double, double> > > result;
std::vector<uint32_t> usedDetids;
// now loop on recHits to find the right detId plus the track local angle
Expand Down Expand Up @@ -349,6 +347,11 @@ void
SiStripFineDelayHit::produce(edm::Event& iEvent, const edm::EventSetup& iSetup)
{
using namespace edm;
//Retrieve tracker topology from geometry
edm::ESHandle<TrackerTopology> tTopoHandle;
iSetup.get<IdealGeometryRecord>().get(tTopoHandle);
const TrackerTopology* const tTopo = tTopoHandle.product();

// Retrieve commissioning information from "event summary"
edm::Handle<SiStripEventSummary> runsummary;
iEvent.getByLabel( inputModuleLabel_, runsummary );
Expand Down Expand Up @@ -409,10 +412,10 @@ SiStripFineDelayHit::produce(edm::Event& iEvent, const edm::EventSetup& iSetup)
else if(((layerCode>>6)&0x3)==2) subdet = StripSubdetector::TID;
else if(((layerCode>>6)&0x3)==3) subdet = StripSubdetector::TEC;
int32_t layerIdx = (layerCode&0xF)*(((layerCode>>4)&0x3) ? -1 : 1);
intersections = detId(*tracker,&(*itrack),trajVec,subdet,layerIdx);
intersections = detId(*tracker,&(*itrack),trajVec,tTopo,subdet,layerIdx);
} else {
// for latency scans, no layer is specified -> no cut on detid
intersections = detId(*tracker,&(*itrack),trajVec);
intersections = detId(*tracker,&(*itrack),trajVec,tTopo);
}
LogDebug("produce") << " Found " << intersections.size() << " interesting intersections." << std::endl;
for(std::vector< std::pair<uint32_t,std::pair<double,double> > >::iterator it = intersections.begin();it<intersections.end();it++) {
Expand Down Expand Up @@ -480,6 +483,11 @@ SiStripFineDelayHit::produce(edm::Event& iEvent, const edm::EventSetup& iSetup)
void
SiStripFineDelayHit::produceNoTracking(edm::Event& iEvent, const edm::EventSetup& iSetup)
{
//Retrieve tracker topology from geometry
edm::ESHandle<TrackerTopology> tTopoHandle;
iSetup.get<IdealGeometryRecord>().get(tTopoHandle);
const TrackerTopology* const tTopo = tTopoHandle.product();

event_ = &iEvent;
// container for the selected hits
std::vector< edm::DetSet<SiStripRawDigi> > output;
Expand All @@ -494,7 +502,7 @@ SiStripFineDelayHit::produceNoTracking(edm::Event& iEvent, const edm::EventSetup
else if(((layerCode>>6)&0x3)==2) subdet = StripSubdetector::TID;
else if(((layerCode>>6)&0x3)==3) subdet = StripSubdetector::TEC;
int32_t layerIdx = (layerCode&0xF)*(((layerCode>>4)&0x3) ? -1 : 1);
std::pair<uint32_t, uint32_t> mask = deviceMask(subdet,layerIdx);
std::pair<uint32_t, uint32_t> mask = deviceMask(subdet,layerIdx,tTopo);
// look at the clusters
edm::Handle<edmNew::DetSetVector<SiStripCluster> > clusters;
iEvent.getByLabel(clusterLabel_,clusters);
Expand Down

0 comments on commit 9d779b7

Please sign in to comment.