Skip to content

Commit

Permalink
Merge pull request #19540 from hroskes/tkalvalidation-dasgoclient
Browse files Browse the repository at this point in the history
Tracker alignment validation das client
  • Loading branch information
cmsbuild committed Jul 6, 2017
2 parents f160607 + 4018e72 commit fb5a0fb
Show file tree
Hide file tree
Showing 2 changed files with 63 additions and 29 deletions.
42 changes: 38 additions & 4 deletions Alignment/OfflineValidation/python/TkAlAllInOneTool/dataset.py
Expand Up @@ -14,6 +14,23 @@
from helperFunctions import cache
from TkAlExceptions import AllInOneError

#####################################################################
#can remove this section and others below once dasgoclient is updated
#(search for olddas)
from helperFunctions import getCommandOutput2
try:
dasgoversion = getCommandOutput2("dasgoclient --version")
except RuntimeError:
olddas = True
#could be no proxy inited, but this will pop up later in any case
else:
dasgoversion = dasgoversion.split()[1].split("git=v")[1]
dasgoversion = tuple(int(_) for _ in dasgoversion.split("."))
if dasgoversion < (1, 0, 5):
olddas = True
else:
olddas = False
#####################################################################

class Dataset(object):
def __init__( self, datasetName, dasLimit = 0, tryPredefinedFirst = True,
Expand Down Expand Up @@ -352,7 +369,12 @@ def forcerunrangefunction(s):
return forcerunrangefunction

def __getData( self, dasQuery, dasLimit = 0 ):
dasData = das_client.get_data(dasQuery, dasLimit)
dasData = das_client.get_data(dasQuery, dasLimit,
############################################
#can remove this once dasgoclient is updated
cmd="das_client" if olddas else None
############################################
)
if isinstance(dasData, str):
jsondict = json.loads( dasData )
else:
Expand Down Expand Up @@ -395,8 +417,12 @@ def __getDataType( self ):
return datatype
return "unknown"

dasQuery_type = ( 'dataset dataset=%s instance=%s | grep dataset.datatype,'
dasQuery_type = ( 'dataset dataset=%s instance=%s detail=true | grep dataset.datatype,'
'dataset.name'%( self.__name, self.__dasinstance ) )
#####################################################################
#can remove this once dasgoclient is updated
if olddas: dasQuery_type = dasQuery_type.replace("detail=true", "")
#####################################################################
data = self.__getData( dasQuery_type )

try:
Expand Down Expand Up @@ -524,7 +550,11 @@ def __getMagneticFieldForRun( self, run = -1, tolerance = 0.5 ):
return float(line.replace("#magnetic field: ", "").split(",")[1].split("#")[0].strip())

if run > 0:
dasQuery = ('run=%s instance=%s'%(run, self.__dasinstance)) #for data
dasQuery = ('run=%s instance=%s detail=true'%(run, self.__dasinstance)) #for data
#####################################################################
#can remove this once dasgoclient is updated
if olddas: dasQuery = dasQuery.replace("detail=true", "")
#####################################################################
data = self.__getData(dasQuery)
try:
return self.__findInJson(data, ["run","bfield"])
Expand Down Expand Up @@ -578,9 +608,13 @@ def __getFileInfoList( self, dasLimit, parent = False ):
searchdataset = self.parentDataset()
else:
searchdataset = self.__name
dasQuery_files = ( 'file dataset=%s instance=%s | grep file.name, file.nevents, '
dasQuery_files = ( 'file dataset=%s instance=%s detail=true | grep file.name, file.nevents, '
'file.creation_time, '
'file.modification_time'%( searchdataset, self.__dasinstance ) )
#####################################################################
#can remove this once dasgoclient is updated
if olddas: dasQuery_files = dasQuery_files.replace("detail=true", "")
#####################################################################
print "Requesting file information for '%s' from DAS..."%( searchdataset ),
sys.stdout.flush()
data = self.__getData( dasQuery_files, dasLimit )
Expand Down
50 changes: 25 additions & 25 deletions Alignment/OfflineValidation/test/testValidate.ini
Expand Up @@ -13,51 +13,51 @@ eosdir = Test
###############################################################################
# configuration of several alignments

[alignment:2016G_prompt]
title = 2016G (prompt)
globaltag = 80X_dataRun2_Prompt_v10
[alignment:prompt]
title = prompt ;unnecessary here, since it defaults to the alignment name, but can contain spaces, TLatex, etc.
globaltag = 92X_dataRun2_Prompt_v2
color = 1
#first digits = marker style for track splitting and Z->mumu. Last 2 digits = line style.
#if you use a <=2 digit number, it will be the line style
#track splitting histograms will use markers if you give a >=3 digit number here, otherwise they will be shown as histograms (i.e. lines)
style = 2001

[alignment:2016G_express]
title = 2016G (express)
globaltag = 80X_dataRun2_Express_v12
[alignment:express]
title = express
globaltag = 92X_dataRun2_Express_v2
color = 2
#first 2 digits = marker style for track splitting and Z->mumu. Last 2 digits = line style.
style = 2402

###############################################################################
# configuration of individual validations

[offline:validation_IsoMu]
[offline:validation_MinBias]
maxevents = 1000
dataset = /SingleMuon/Run2016G-TkAlMuonIsolated-PromptReco-v1/ALCARECO
trackcollection = ALCARECOTkAlMuonIsolated
dataset = /MinimumBias/Run2017A-TkAlMinBias-PromptReco-v1/ALCARECO
trackcollection = ALCARECOTkAlMinBias

[offline:validation_cosmics]
maxevents = 1000
dataset = /NoBPTX/Run2016G-TkAlCosmicsInCollisions-PromptReco-v1/ALCARECO
trackcollection = ALCARECOTkAlCosmicsInCollisions
dataset = /Cosmics/Run2017A-TkAlCosmics0T-PromptReco-v1/ALCARECO
trackcollection = ALCARECOTkAlCosmicsCTF0T

[compare:Tracker]
levels = "Tracker","DetUnit"
dbOutput = false

[zmumu:some_zmumu_validation]
maxevents = 1000
dataset = /DoubleMuon/Run2016G-TkAlZMuMu-PromptReco-v1/ALCARECO
dataset = /DoubleMuon/Run2017A-TkAlZMuMu-PromptReco-v3/ALCARECO
etamaxneg = 2.4
etaminneg = -2.4
etamaxpos = 2.4
etaminpos = -2.4

[split:some_split_validation]
maxevents = 1000
dataset = /NoBPTX/Run2016G-TkAlCosmicsInCollisions-PromptReco-v1/ALCARECO
trackcollection = ALCARECOTkAlCosmicsInCollisions
dataset = /Cosmics/Run2017A-TkAlCosmics0T-PromptReco-v1/ALCARECO
trackcollection = ALCARECOTkAlCosmicsCTF0T

#[mcValidate:some_mc_validation]
#maxevents = 1000
Expand All @@ -75,7 +75,7 @@ legendoptions = meanerror rmserror modules outside
#uncomment these to get titles

#customtitle = #CMS{Preliminary}
customrighttitle = 2016G cosmics and collisions data
customrighttitle = 2017A cosmics and collisions data
#legendheader = header

#to ensure the legend font is readable, but it gets messy if you have >~3 curves
Expand All @@ -95,7 +95,7 @@ subdetector = none
#outliercut = 0.95

#customtitle = #CMS{Preliminary}
customrighttitle = 2016G cosmics during collisions data
customrighttitle = 2017A 3.8T cosmics data
#legendheader = header

[plots:zmumu]
Expand All @@ -108,15 +108,15 @@ customrighttitle = 2016G Z#rightarrow#mu#mu data, |#eta|<2.4
# configure which validation to run on which alignment

[validation]
offline validation_IsoMu : 2016G_prompt
offline validation_IsoMu : 2016G_express
offline validation_cosmics : 2016G_prompt
offline validation_cosmics : 2016G_express
offline validation_MinBias : prompt
offline validation_MinBias : express
offline validation_cosmics : prompt
offline validation_cosmics : express
#278819 is the run number. Note that the plots will show (first geometry) - (second geometry), in this case prompt - express
compare Tracker: 2016G_prompt 278819, 2016G_express 278819
zmumu some_zmumu_validation : 2016G_prompt
zmumu some_zmumu_validation : 2016G_express
split some_split_validation : 2016G_prompt
split some_split_validation : 2016G_express
compare Tracker: prompt 278819, express 278819
zmumu some_zmumu_validation : prompt
zmumu some_zmumu_validation : express
split some_split_validation : prompt
split some_split_validation : express
#mcValidate some_mc_validation : alignment_0

0 comments on commit fb5a0fb

Please sign in to comment.