Skip to content

Commit

Permalink
Merge pull request #15938 from lgray/followup_timing_codereview
Browse files Browse the repository at this point in the history
Address residual code review for Timing PRs
  • Loading branch information
cmsbuild committed Oct 3, 2016
2 parents 33c3fc9 + 8bc21be commit 5ee2bb8
Show file tree
Hide file tree
Showing 14 changed files with 80 additions and 146 deletions.
10 changes: 5 additions & 5 deletions DataFormats/VertexReco/interface/Vertex.h
Expand Up @@ -38,17 +38,17 @@ namespace reco {
/// point in the space
typedef math::XYZPoint Point;
/// error matrix dimension
enum { dimension = 3 };
enum { dimension = 3, dimension4D = 4 };
/// covariance error matrix (3x3)
typedef math::Error<dimension>::type Error;
/// covariance error matrix (3x3)
typedef math::Error<dimension>::type CovarianceMatrix;
/// covariance error matrix (4x4)
typedef math::Error<dimension+1>::type Error4D;
typedef math::Error<dimension4D>::type Error4D;
/// covariance error matrix (4x4)
typedef math::Error<dimension+1>::type CovarianceMatrix4D;
typedef math::Error<dimension4D>::type CovarianceMatrix4D;
/// matix size
enum { size = dimension * ( dimension + 1 ) / 2, size4D = ( dimension + 1 ) * ( dimension + 2 ) / 2 };
enum { size = dimension * ( dimension + 1 ) / 2, size4D = ( dimension4D ) * ( dimension4D + 1 ) / 2 };
/// index type
typedef unsigned int index;
/// default constructor - The vertex will not be valid. Position, error,
Expand Down Expand Up @@ -185,7 +185,7 @@ namespace reco {
/// position
Point position_;
/// covariance matrix (4x4) as vector
float covariance_[ size4D];
float covariance_[size4D];
/// reference to tracks
std::vector<TrackBaseRef> tracks_;
/// The vector of refitted tracks
Expand Down
8 changes: 4 additions & 4 deletions DataFormats/VertexReco/src/Vertex.cc
Expand Up @@ -9,7 +9,7 @@ Vertex::Vertex( const Point & p , const Error & err, double chi2, double ndof, s
chi2_( chi2 ), ndof_( ndof ), position_( p ), time_(0.) {
tracks_.reserve( size );
index idx = 0;
for( index i = 0; i < dimension + 1; ++ i ) {
for( index i = 0; i < dimension4D; ++ i ) {
for( index j = 0; j <= i; ++ j ) {
if( i == dimension || j == dimension ) {
covariance_[ idx ++ ] = 0.0;
Expand All @@ -25,7 +25,7 @@ Vertex::Vertex( const Point & p , const Error4D & err, double time, double chi2,
chi2_( chi2 ), ndof_( ndof ), position_( p ), time_(time) {
tracks_.reserve( size4D );
index idx = 0;
for( index i = 0; i < dimension + 1; ++ i )
for( index i = 0; i < dimension4D; ++ i )
for( index j = 0; j <= i; ++ j )
covariance_[ idx ++ ] = err( i, j );
validity_ = true;
Expand All @@ -34,7 +34,7 @@ Vertex::Vertex( const Point & p , const Error4D & err, double time, double chi2,
Vertex::Vertex( const Point & p , const Error & err) :
chi2_( 0.0 ), ndof_( 0 ), position_( p ), time_(0.) {
index idx = 0;
for( index i = 0; i < dimension + 1; ++ i ) {
for( index i = 0; i < dimension4D; ++ i ) {
for( index j = 0; j <= i; ++ j ) {
if( i == dimension || j == dimension ) {
covariance_[ idx ++ ] = 0.0;
Expand Down Expand Up @@ -63,7 +63,7 @@ void Vertex::fill( Error & err ) const {

void Vertex::fill( Error4D & err ) const {
index idx = 0;
for( index i = 0; i < dimension + 1; ++ i )
for( index i = 0; i < dimension4D; ++ i )
for( index j = 0; j <= i; ++ j )
err( i, j ) = covariance_[ idx ++ ];
}
Expand Down
17 changes: 5 additions & 12 deletions RecoVertex/Configuration/python/RecoVertex_cff.py
Expand Up @@ -33,6 +33,7 @@
)

#timing
from RecoVertex.PrimaryVertexProducer.TkClusParameters_cff import DA2DParameters
unsortedOfflinePrimaryVertices1D = unsortedOfflinePrimaryVertices.clone()
unsortedOfflinePrimaryVertices1D.TkFilterParameters.minPt = cms.double(0.7)
offlinePrimaryVertices1D=sortedPrimaryVertices.clone(vertices="unsortedOfflinePrimaryVertices1D", particles="trackRefsForJetsBeforeSorting")
Expand All @@ -46,9 +47,9 @@
offlinePrimaryVertices4D=sortedPrimaryVertices.clone(vertices="unsortedOfflinePrimaryVertices4D", particles="trackRefsForJetsBeforeSorting")
offlinePrimaryVertices4DWithBS=sortedPrimaryVertices.clone(vertices="unsortedOfflinePrimaryVertices4D:WithBS", particles="trackRefsForJetsBeforeSorting")

from SimTracker.TrackerHitAssociation.tpClusterProducer_cfi import *
from SimTracker.TrackAssociatorProducers.quickTrackAssociatorByHits_cfi import *
from SimTracker.TrackAssociation.trackTimeValueMapProducer_cfi import *
from SimTracker.TrackerHitAssociation.tpClusterProducer_cfi import tpClusterProducer
from SimTracker.TrackAssociatorProducers.quickTrackAssociatorByHits_cfi import quickTrackAssociatorByHits
from SimTracker.TrackAssociation.trackTimeValueMapProducer_cfi import trackTimeValueMapProducer
_phase2_tktiming_vertexreco = cms.Sequence( vertexreco.copy() *
tpClusterProducer *
quickTrackAssociatorByHits *
Expand All @@ -62,13 +63,5 @@
)

from Configuration.Eras.Modifier_phase2_timing_cff import phase2_timing
phase2_timing.toModify( quickTrackAssociatorByHits,
pixelSimLinkSrc = cms.InputTag("simSiPixelDigis","Pixel"),
stripSimLinkSrc = cms.InputTag("simSiPixelDigis","Tracker")
)

phase2_timing.toModify( tpClusterProducer,
pixelSimLinkSrc = cms.InputTag("simSiPixelDigis", "Pixel"),
phase2OTSimLinkSrc = cms.InputTag("simSiPixelDigis","Tracker")
)
phase2_timing.toReplaceWith(vertexreco, _phase2_tktiming_vertexreco)

Expand Up @@ -65,15 +65,11 @@ struct vertex_t{
std::vector<track_t> & tks,
std::vector<vertex_t> & y,
double threshold ) const;

double update( double beta,
std::vector<track_t> & tks,
std::vector<vertex_t> & y ) const;


double update(double beta,
std::vector<track_t> & tks,
std::vector<vertex_t> & y,
double &rho0 )const;
const double rho0 = 0.0 )const;

void dump(const double beta, const std::vector<vertex_t> & y, const std::vector<track_t> & tks, const int verbosity=0) const;
bool merge(std::vector<vertex_t> &,int ) const;
Expand All @@ -95,6 +91,7 @@ struct vertex_t{
float vertexSize_;
int maxIterations_;
double coolingFactor_;
double logCoolingFactor_;
float betamax_;
float betastop_;
double dzCutOff_;
Expand Down
@@ -1,15 +1,6 @@
import FWCore.ParameterSet.Config as cms

DA2DParameters = cms.PSet(
algorithm = cms.string("DA2D"),
TkDAClusParameters = cms.PSet(
coolingFactor = cms.double(0.6), # moderate annealing speed
Tmin = cms.double(4.), # end of annealing
vertexSize = cms.double(0.01), # ~ resolution / sqrt(Tmin)
d0CutOff = cms.double(3.), # downweight high IP tracks
dzCutOff = cms.double(4.) # outlier rejection after freeze-out (T<Tmin)
)
)
from RecoVertex.PrimaryVertexProducer.TkClusParameters_cff import DA_vectParameters

offlinePrimaryVertices = cms.EDProducer(
"PrimaryVertexProducer",
Expand All @@ -28,16 +19,7 @@
trackQuality = cms.string("any")
),

TkClusParameters = cms.PSet(
algorithm = cms.string("DA_vect"),
TkDAClusParameters = cms.PSet(
coolingFactor = cms.double(0.6), # moderate annealing speed
Tmin = cms.double(4.), # end of annealing
vertexSize = cms.double(0.01), # ~ resolution / sqrt(Tmin)
d0CutOff = cms.double(3.), # downweight high IP tracks
dzCutOff = cms.double(4.) # outlier rejection after freeze-out (T<Tmin)
)
),
TkClusParameters = DA_vectParameters,

vertexCollections = cms.VPSet(
[cms.PSet(label=cms.string(""),
Expand Down
23 changes: 23 additions & 0 deletions RecoVertex/PrimaryVertexProducer/python/TkClusParameters_cff.py
@@ -0,0 +1,23 @@
import FWCore.ParameterSet.Config as cms

DA2DParameters = cms.PSet(
algorithm = cms.string("DA2D"),
TkDAClusParameters = cms.PSet(
coolingFactor = cms.double(0.6), # moderate annealing speed
Tmin = cms.double(4.), # end of annealing
vertexSize = cms.double(0.01), # ~ resolution / sqrt(Tmin)
d0CutOff = cms.double(3.), # downweight high IP tracks
dzCutOff = cms.double(4.) # outlier rejection after freeze-out (T<Tmin)
)
)

DA_vectParameters = cms.PSet(
algorithm = cms.string("DA_vect"),
TkDAClusParameters = cms.PSet(
coolingFactor = cms.double(0.6), # moderate annealing speed
Tmin = cms.double(4.), # end of annealing
vertexSize = cms.double(0.01), # ~ resolution / sqrt(Tmin)
d0CutOff = cms.double(3.), # downweight high IP tracks
dzCutOff = cms.double(4.) # outlier rejection after freeze-out (T<Tmin)
)
)
88 changes: 10 additions & 78 deletions RecoVertex/PrimaryVertexProducer/src/DAClusterizerInZT.cc
Expand Up @@ -74,87 +74,19 @@ double DAClusterizerInZT::e_ik(const track_t & t, const vertex_t &k ) const {



double DAClusterizerInZT::update( double beta,
vector<track_t> & tks,
vector<vertex_t> & y ) const {
//update weights and vertex positions
// mass constrained annealing without noise
// returns the squared sum of changes of vertex positions

unsigned int nt=tks.size();

//initialize sums
double sumpi=0;
for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); ++k){
k->sw=0.; k->swz=0.; k->swt = 0.; k->se=0.;
k->swE=0.; k->tC=0.;
}


// loop over tracks
for(unsigned int i=0; i<nt; i++){

// update pik and Zi
double Zi = 0.;
for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); ++k){
k->ei = std::exp(-beta*e_ik(tks[i],*k));// cache exponential for one track at a time
Zi += k->pk * k->ei;
}
tks[i].zi=Zi;

// normalization for pk
if (tks[i].zi>0){
sumpi += tks[i].pi;
// accumulate weighted z and weights for vertex update
for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); ++k){
k->se += tks[i].pi* k->ei / Zi;
const double w = k->pk * tks[i].pi* k->ei / ( Zi * ( tks[i].dz2 * tks[i].dt2 ) );
k->sw += w;
k->swz += w * tks[i].z;
k->swt += w * tks[i].t;
k->swE += w * e_ik(tks[i],*k);
}
}else{
sumpi += tks[i].pi;
}


} // end of track loop


// now update z and pk
double delta=0;
for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
if ( k->sw > 0){
const double znew = k->swz/k->sw;
const double tnew = k->swt/k->sw;
delta += sqr(k->z-znew) + sqr(k->t-tnew);
k->z = znew;
k->t = tnew;
k->tC = 2.*k->swE/k->sw;
}else{
LogDebug("sumw") << "invalid sum of weights in fit: " << k->sw << endl;
if(verbose_){cout << " a cluster melted away ? pk=" << k->pk << " sumw=" << k->sw << endl;}
k->tC=-1;
}

k->pk = k->pk * k->se / sumpi;
}

// return how much the prototypes moved
return delta;
}

double DAClusterizerInZT::update( double beta,
vector<track_t> & tks,
vector<vertex_t> & y,
double & rho0 ) const {
const double rho0 ) const {
// MVF style, no more vertex weights, update tracks weights and vertex positions, with noise
// returns the squared sum of changes of vertex positions

unsigned int nt=tks.size();

//initialize sums
double sumpi = 0.;
for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
k->sw = 0.; k->swz = 0.; k->swt = 0.; k->se = 0.;
k->swE = 0.; k->tC=0.;
Expand All @@ -172,10 +104,11 @@ double DAClusterizerInZT::update( double beta,
Zi += k->pk * k->ei;
}
tks[i].zi=Zi;
sumpi += tks[i].pi;

// normalization
if (tks[i].zi>0){
// accumulate weighted z and weights for vertex update
// accumulate weighted z and weights for vertex update
for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
k->se += tks[i].pi* k->ei / Zi;
double w = k->pk * tks[i].pi * k->ei /( Zi * ( tks[i].dz2 * tks[i].dt2 ) );
Expand All @@ -185,8 +118,6 @@ double DAClusterizerInZT::update( double beta,
k->swE += w * e_ik(tks[i],*k);
}
}


} // end of track loop


Expand All @@ -203,9 +134,9 @@ double DAClusterizerInZT::update( double beta,
}else{
edm::LogInfo("sumw") << "invalid sum of weights in fit: " << k->sw << endl;
if(verbose_){cout << " a cluster melted away ? pk=" << k->pk << " sumw=" << k->sw << endl;}
k->tC = 0;
k->tC = (rho0 == 0. ? -1 : 0);
}

if(rho0 == 0.) k->pk = k->pk * k->se / sumpi;
}

// return how much the prototypes moved
Expand Down Expand Up @@ -345,7 +276,7 @@ double DAClusterizerInZT::beta0( double betamax,
}// vertex loop (normally there should be only one vertex at beta=0)

if (T0>1./betamax){
return betamax/pow(coolingFactor_, int(std::log(T0*betamax)/std::log(coolingFactor_))-1 );
return betamax/pow(coolingFactor_, int(std::log(T0*betamax)*logCoolingFactor_)-1 );
}else{
// ensure at least one annealing step
return betamax/coolingFactor_;
Expand Down Expand Up @@ -463,7 +394,7 @@ DAClusterizerInZT::DAClusterizerInZT(const edm::ParameterSet& conf) :
useTc_(true),
vertexSize_(conf.getParameter<double>("vertexSize")),
maxIterations_(100),
coolingFactor_(std::sqrt(conf.getParameter<double>("coolingFactor"))),
coolingFactor_(std::sqrt(conf.getParameter<double>("coolingFactor"))),
betamax_(0.1),
betastop_(1.0),
dzCutOff_(conf.getParameter<double>("dzCutOff")),
Expand All @@ -482,7 +413,8 @@ DAClusterizerInZT::DAClusterizerInZT(const edm::ParameterSet& conf) :
if(coolingFactor_<0){
coolingFactor_=-coolingFactor_; useTc_=false;
}


logCoolingFactor_ = 1.0/std::log(coolingFactor_);
}


Expand Down
16 changes: 8 additions & 8 deletions RecoVertex/VertexPrimitives/interface/VertexState.h
Expand Up @@ -26,43 +26,43 @@ class VertexState final : private BasicVertexState::Proxy {
// Base ( new BSVS ( std::forward<Args>(args)...)){}

explicit VertexState(BasicVertexState* p) :
Base(p) {}
Base(p) {}

explicit VertexState(const reco::BeamSpot& beamSpot) :
Base ( new BSVS ( GlobalPoint(Basic3DVector<float> (beamSpot.position())),
Base ( new BSVS ( GlobalPoint(Basic3DVector<float> (beamSpot.position())),
GlobalError(beamSpot.rotatedCovariance3D()), 1.0)) {}


VertexState(const GlobalPoint & pos,
const GlobalError & posErr,
const double & weightInMix= 1.0) :
Base ( new BSVS (pos, posErr, weightInMix)) {}
Base ( new BSVS (pos, posErr, weightInMix)) {}

VertexState(const GlobalPoint & pos,
const GlobalWeight & posWeight,
const double & weightInMix= 1.0) :
Base ( new BSVS (pos, posWeight, weightInMix)) {}
Base ( new BSVS (pos, posWeight, weightInMix)) {}

VertexState(const AlgebraicVector3 & weightTimesPosition,
const GlobalWeight & posWeight,
const double & weightInMix= 1.0) :
Base ( new BSVS (weightTimesPosition, posWeight, weightInMix)) {}
Base ( new BSVS (weightTimesPosition, posWeight, weightInMix)) {}

// with time
VertexState(const GlobalPoint & pos, const double time,
const GlobalError & posTimeErr,
const double & weightInMix = 1.0) :
Base ( new BSVS (pos, time, posTimeErr, weightInMix)) {}
Base ( new BSVS (pos, time, posTimeErr, weightInMix)) {}

VertexState(const GlobalPoint & pos, const double time,
const GlobalWeight & posTimeWeight,
const double & weightInMix = 1.0) :
Base ( new BSVS (pos, time, posTimeWeight, weightInMix)) {}
Base ( new BSVS (pos, time, posTimeWeight, weightInMix)) {}

VertexState(const AlgebraicVector4 & weightTimesPosition,
const GlobalWeight & posTimeWeight,
const double & weightInMix = 1.0) :
Base ( new BSVS (weightTimesPosition, posTimeWeight, weightInMix)) {}
Base ( new BSVS (weightTimesPosition, posTimeWeight, weightInMix)) {}


//3D covariance matrices (backwards compatible)
Expand Down

0 comments on commit 5ee2bb8

Please sign in to comment.