Skip to content

Commit

Permalink
Merge pull request #14058 from VinInn/SpeedPFC81_2
Browse files Browse the repository at this point in the history
speed up avoiding eta phi computations
  • Loading branch information
cmsbuild committed Apr 21, 2016
2 parents 9837ca8 + 93bc63e commit 3ac8c1a
Show file tree
Hide file tree
Showing 5 changed files with 119 additions and 71 deletions.
Expand Up @@ -4,7 +4,9 @@
#include "FWCore/Utilities/interface/isFinite.h"

#include <cmath>
#include <unordered_map>
#include "CommonTools/Utils/interface/DynArray.h"
#include<iterator>
#include <boost/function_output_iterator.hpp>

#include "vdt/vdtMath.h"

Expand All @@ -31,47 +33,63 @@ calculateAndSetPositionActual(reco::PFCluster& cluster) const {
double cl_timeweight=0.0;
double max_e = 0.0;
PFLayer::Layer max_e_layer = PFLayer::NONE;
reco::PFRecHitRef refseed;
// find the seed and max layer and also calculate time
//Michalis : Even if we dont use timing in clustering here we should fill
//the time information for the cluster. This should use the timing resolution(1/E)
//so the weight should be fraction*E^2
//calculate a simplistic depth now. The log weighted will be done
//in different stage
for( const reco::PFRecHitFraction& rhf : cluster.recHitFractions() ) {
const reco::PFRecHitRef& refhit = rhf.recHitRef();
if( refhit->detId() == cluster.seed() ) refseed = refhit;
const double rh_fraction = rhf.fraction();
const double rh_rawenergy = refhit->energy();


auto const recHitCollection = &(*cluster.recHitFractions()[0].recHitRef()) - cluster.recHitFractions()[0].recHitRef().key();
auto nhits = cluster.recHitFractions().size();
struct LHit{ reco::PFRecHit const * hit; double energy; double fraction;};
declareDynArray(LHit,nhits,hits);
for(auto i=0U; i<nhits; ++i) {
auto const & hf = cluster.recHitFractions()[i];
auto k = hf.recHitRef().key();
auto p = recHitCollection+k;
hits[i]= {p,(*p).energy(), hf.fraction()};
}

if(_posCalcNCrystals != -1) // sorted to make neighbour search faster
std::sort(hits.begin(),hits.end(),[](LHit const& a, LHit const& b) { return a.hit<b.hit;});

LHit mySeed={nullptr};
for( auto const & rhf : hits ) {
const reco::PFRecHit & refhit = *rhf.hit;
if( refhit.detId() == cluster.seed() ) mySeed = rhf;
const double rh_fraction = rhf.fraction;
const double rh_rawenergy = rhf.energy;
const double rh_energy = rh_rawenergy * rh_fraction;
if( edm::isNotFinite(rh_energy) ) {
throw cms::Exception("PFClusterAlgo")
<<"rechit " << refhit->detId() << " has a NaN energy... "
<<"rechit " << refhit.detId() << " has a NaN energy... "
<< "The input of the particle flow clustering seems to be corrupted.";
}
cl_energy += rh_energy;
// If time resolution is given, calculated weighted average
if (_timeResolutionCalcBarrel && _timeResolutionCalcEndcap) {
double res2 = 10000.;
int cell_layer = (int)refhit->layer();
if ( bool(_timeResolutionCalcBarrel) & bool(_timeResolutionCalcEndcap) ) {
double res2 = 1.e-4;
int cell_layer = (int)refhit.layer();
if (cell_layer == PFLayer::HCAL_BARREL1 ||
cell_layer == PFLayer::HCAL_BARREL2 ||
cell_layer == PFLayer::ECAL_BARREL)
res2 = _timeResolutionCalcBarrel->timeResolution2(rh_rawenergy);
res2 = 1./_timeResolutionCalcBarrel->timeResolution2(rh_rawenergy);
else
res2 = _timeResolutionCalcEndcap->timeResolution2(rh_rawenergy);
cl_time += rh_fraction*refhit->time()/res2;
cl_timeweight += rh_fraction/res2;
res2 = 1./_timeResolutionCalcEndcap->timeResolution2(rh_rawenergy);
cl_time += rh_fraction*refhit.time()*res2;
cl_timeweight += rh_fraction*res2;
}
else { // assume resolution = 1/E**2
const double rh_rawenergy2 = rh_rawenergy*rh_rawenergy;
cl_timeweight+=rh_rawenergy2*rh_fraction;
cl_time += rh_rawenergy2*rh_fraction*refhit->time();
cl_time += rh_rawenergy2*rh_fraction*refhit.time();
}

if( rh_energy > max_e ) {
max_e = rh_energy;
max_e_layer = rhf.recHitRef()->layer();
max_e_layer = refhit.layer();
}
}
cluster.setEnergy(cl_energy);
Expand All @@ -85,38 +103,50 @@ calculateAndSetPositionActual(reco::PFCluster& cluster) const {
const reco::PFRecHitRefVector* seedNeighbours = nullptr;
switch( _posCalcNCrystals ) {
case 5:
seedNeighbours = &refseed->neighbours4();
seedNeighbours = &mySeed.hit->neighbours4();
break;
case 9:
seedNeighbours = &refseed->neighbours8();
break;
case -1:
seedNeighbours = &mySeed.hit->neighbours8();
break;
default:
assert(0); //bug
break;
}

for( const reco::PFRecHitFraction& rhf : cluster.recHitFractions() ) {
const reco::PFRecHitRef& refhit = rhf.recHitRef();

if( refhit != refseed && _posCalcNCrystals != -1 ) {
auto pos = std::find(seedNeighbours->begin(),seedNeighbours->end(),
refhit);
if( pos == seedNeighbours->end() ) continue;
}

const double rh_energy = refhit->energy() * ((float)rhf.fraction());
const double norm = ( rhf.fraction() < _minFractionInCalc ?
auto compute = [&](LHit const& rhf) {
const reco::PFRecHit & refhit = *rhf.hit;
const double rh_energy = rhf.energy * rhf.fraction;
const double norm = ( rhf.fraction < _minFractionInCalc ?
0.0 :
std::max(0.0,vdt::fast_log(rh_energy/_logWeightDenom)) );
const math::XYZPoint& rhpos_xyz = refhit->position();
std::max(0.0,vdt::fast_log(rh_energy*_logWeightDenom)) );
const math::XYZPoint& rhpos_xyz = refhit.position();
x += rhpos_xyz.X() * norm;
y += rhpos_xyz.Y() * norm;
z += rhpos_xyz.Z() * norm;
depth += refhit->depth()*norm;

depth += refhit.depth()*norm;
position_norm += norm;
};

if(_posCalcNCrystals == -1)
for( auto const & rhf : hits ) compute(rhf);
else { // only seed and its neighbours
compute(mySeed);
// search seedNeighbours to find energy fraction in cluster (sic)
unInitDynArray(reco::PFRecHit const *,seedNeighbours->size(),nei);
for(auto k :seedNeighbours->refVector().keys()){
nei.push_back(&recHitCollection[k]);
}
std::sort(nei.begin(),nei.end());
struct LHitLess {
auto operator()(LHit const &a, reco::PFRecHit const * b) const {return a.hit<b;}
auto operator()(reco::PFRecHit const * b, LHit const &a) const {return b<a.hit;}
};
std::set_intersection(hits.begin(),hits.end(),nei.begin(),nei.end(),
boost::make_function_output_iterator(compute),
LHitLess()
);
}


if( position_norm < _minAllowedNorm ) {
edm::LogError("WeirdClusterNormalization")
<< "PFCluster too far from seeding cell: set position to (0,0,0).";
Expand Down
Expand Up @@ -5,14 +5,16 @@
#include "DataFormats/ParticleFlowReco/interface/PFRecHitFwd.h"
#include "DataFormats/ParticleFlowReco/interface/PFRecHit.h"

#include "FWCore/MessageLogger/interface/MessageLogger.h"

#include "RecoParticleFlow/PFClusterProducer/interface/CaloRecHitResolutionProvider.h"

class Basic2DGenericPFlowPositionCalc : public PFCPositionCalculatorBase {
public:
Basic2DGenericPFlowPositionCalc(const edm::ParameterSet& conf) :
PFCPositionCalculatorBase(conf),
_posCalcNCrystals(conf.getParameter<int>("posCalcNCrystals")),
_logWeightDenom(conf.getParameter<double>("logWeightDenominator")),
_logWeightDenom(1./conf.getParameter<double>("logWeightDenominator")),
_minAllowedNorm(conf.getParameter<double>("minAllowedNormalization"))

{
Expand All @@ -28,7 +30,20 @@ class Basic2DGenericPFlowPositionCalc : public PFCPositionCalculatorBase {
conf.getParameterSet("timeResolutionCalcEndcap");
_timeResolutionCalcEndcap.reset(new CaloRecHitResolutionProvider(timeResConf));
}

switch( _posCalcNCrystals ) {
case 5:
case 9:
case -1:
break;
default:
edm::LogError("Basic2DGenericPFlowPositionCalc") << "posCalcNCrystals not valid";
assert(0); // bug
}


}

Basic2DGenericPFlowPositionCalc(const Basic2DGenericPFlowPositionCalc&) = delete;
Basic2DGenericPFlowPositionCalc& operator=(const Basic2DGenericPFlowPositionCalc&) = delete;

Expand Down
Expand Up @@ -79,9 +79,9 @@ calculateAndSetPositionActual(reco::PFCluster& cluster) const {
cl_energy_float += rh_energyf;
// If time resolution is given, calculate weighted average
if (_timeResolutionCalc) {
const double res2 = _timeResolutionCalc->timeResolution2(rh_rawenergy);
cl_time += rh_fraction*refhit->time()/res2;
cl_timeweight += rh_fraction/res2;
const double res2 = 1./_timeResolutionCalc->timeResolution2(rh_rawenergy);
cl_time += rh_fraction*refhit->time()*res2;
cl_timeweight += rh_fraction*res2;
}
else { // assume resolution ~ 1/E**2
const double rh_rawenergy2 = rh_rawenergy*rh_rawenergy;
Expand Down Expand Up @@ -115,7 +115,7 @@ calculateAndSetPositionActual(reco::PFCluster& cluster) const {

const CaloCellGeometry* center_cell =
ecal_geom->getGeometry(refmax->detId());
const double ctreta = center_cell->getPosition().eta();
const double ctreta = center_cell->etaPos();
const double actreta = std::abs(ctreta);
// need to change T0 if in ES
if( actreta > preshowerStartEta && actreta < preshowerEndEta ) {
Expand Down
59 changes: 31 additions & 28 deletions RecoParticleFlow/PFClusterProducer/src/PFMultiDepthClusterizer.cc
Expand Up @@ -37,8 +37,8 @@ buildClusters(const reco::PFClusterCollection& input,
const std::vector<bool>& seedable,
reco::PFClusterCollection& output) {

std::vector<double> etaRMS(input.size(),0.0);
std::vector<double> phiRMS(input.size(),0.0);
std::vector<double> etaRMS2(input.size(),0.0);
std::vector<double> phiRMS2(input.size(),0.0);

//We need to sort the clusters for smaller to larger depth
// for (unsigned int i=0;i<input.size();++i)
Expand All @@ -47,10 +47,10 @@ buildClusters(const reco::PFClusterCollection& input,


//calculate cluster shapes
calculateShowerShapes(input,etaRMS,phiRMS);
calculateShowerShapes(input,etaRMS2,phiRMS2);

//link
std::vector<ClusterLink> links = link(input,etaRMS,phiRMS);
auto && links = link(input,etaRMS2,phiRMS2);
// for (const auto& link: links)
// printf("link %d %d %f %f\n",link.from(),link.to(),link.dR(),link.dZ());

Expand All @@ -60,9 +60,7 @@ buildClusters(const reco::PFClusterCollection& input,
std::vector<bool> linked(input.size(),false);

//prune
std::vector<ClusterLink> prunedLinks;
if (links.size())
prunedLinks = prune(links,linked);
auto && prunedLinks = prune(links,linked);

//printf("Pruned links\n")
// for (const auto& link: prunedLinks)
Expand Down Expand Up @@ -97,54 +95,59 @@ buildClusters(const reco::PFClusterCollection& input,


void PFMultiDepthClusterizer::
calculateShowerShapes(const reco::PFClusterCollection& clusters,std::vector<double>& etaRMS,std::vector<double>& phiRMS) {
float etaSum;
float phiSum;
calculateShowerShapes(const reco::PFClusterCollection& clusters,std::vector<double>& etaRMS2,std::vector<double>& phiRMS2) {

//shower shapes. here do not use the fractions

for( unsigned int i=0;i<clusters.size();++i ) {
const reco::PFCluster& cluster = clusters[i];
etaSum=0.0;phiSum=0.0;
double etaSum=0.0; double phiSum=0.0;
auto const & crep = cluster.positionREP();
for (const auto& frac : cluster.recHitFractions()) {
etaSum +=frac.fraction()*frac.recHitRef()->energy()*fabs(frac.recHitRef()->position().Eta() -cluster.position().Eta() );
phiSum +=frac.fraction()*frac.recHitRef()->energy()*fabs(deltaPhi(frac.recHitRef()->position().Phi(),cluster.position().Phi()));
auto const & h = *frac.recHitRef();
auto const & rep = h.positionREP();
etaSum += (frac.fraction()*h.energy())*std::abs(rep.eta()-crep.eta());
phiSum += (frac.fraction()*h.energy())*std::abs(deltaPhi(rep.phi(),crep.phi()));
}
//protection for single line : assign ~ tower
etaRMS[i] = std::max(etaSum/cluster.energy(),0.1);
phiRMS[i] = std::max(phiSum/cluster.energy(),0.1);

etaRMS2[i] = std::max(etaSum/cluster.energy(),0.1);
etaRMS2[i]*= etaRMS2[i];
phiRMS2[i] = std::max(phiSum/cluster.energy(),0.1);
phiRMS2[i]*=phiRMS2[i];
}

}


std::vector<PFMultiDepthClusterizer::ClusterLink> PFMultiDepthClusterizer::
link(const reco::PFClusterCollection& clusters ,const std::vector<double>& etaRMS,const std::vector<double>& phiRMS) {
link(const reco::PFClusterCollection& clusters ,const std::vector<double>& etaRMS2,const std::vector<double>& phiRMS2) {

std::vector<ClusterLink> links;
//loop on all pairs
for (unsigned int i=0;i<clusters.size();++i)
for (unsigned int j=0;j<clusters.size();++j) {
if (i==j )
continue;
if (i==j ) continue;

const reco::PFCluster& cluster1 = clusters[i];
const reco::PFCluster& cluster2 = clusters[j];

float dz = (cluster2.depth() - cluster1.depth());
auto dz = (cluster2.depth() - cluster1.depth());

//Do not link at the same layer and only link inside out!
if (dz<0.0 || fabs(dz)<0.2)
continue;

float deta =(cluster1.position().Eta()-cluster2.position().Eta())*(cluster1.position().Eta()-cluster2.position().Eta())/(etaRMS[i]*etaRMS[i]+etaRMS[j]*etaRMS[j]);
float dphi = deltaPhi(cluster1.position().Phi(),cluster2.position().Phi())*deltaPhi(cluster1.position().Phi(),cluster2.position().Phi())/(phiRMS[i]*phiRMS[i]+phiRMS[j]*phiRMS[j]);
if (dz<0.0f || std::abs(dz)<0.2f) continue;

auto const & crep1 = cluster1.positionREP();
auto const & crep2 = cluster2.positionREP();

auto deta = crep1.eta()-crep2.eta();
deta = deta*deta/(etaRMS2[i]+etaRMS2[j]);
auto dphi = deltaPhi(crep1.phi(),crep2.phi());
dphi = dphi*dphi/(phiRMS2[i]+phiRMS2[j]);

// printf("Testing Link %d -> %d (%f %f %f %f ) \n",i,j,deta,dphi,cluster1.position().Eta()-cluster2.position().Eta(),deltaPhi(cluster1.position().Phi(),cluster2.position().Phi()));

if (deta<nSigmaEta_ && dphi<nSigmaPhi_ )
links.push_back(ClusterLink(i,j,deta+dphi,fabs(dz),cluster1.energy()+cluster2.energy()));
if ( (deta<nSigmaEta_) & (dphi<nSigmaPhi_ ) )
links.push_back(ClusterLink(i,j,deta+dphi,std::abs(dz),cluster1.energy()+cluster2.energy()));
}

return links;
Expand All @@ -154,12 +157,12 @@ std::vector<PFMultiDepthClusterizer::ClusterLink> PFMultiDepthClusterizer::
prune(std::vector<ClusterLink>& links,std::vector<bool>& linkedClusters) {
std::vector<ClusterLink> goodLinks ;
std::vector<bool> mask(links.size(),false);
if (links.empty()) return goodLinks;

for (unsigned int i=0;i<links.size()-1;++i) {
if (mask[i])
continue;
for (unsigned int j=i+1;j<links.size();++j) {

if (mask[j])
continue;

Expand Down
Expand Up @@ -6,7 +6,7 @@

#include <unordered_map>

class PFMultiDepthClusterizer : public PFClusterBuilderBase {
class PFMultiDepthClusterizer final : public PFClusterBuilderBase {
typedef PFMultiDepthClusterizer B2DGPF;
public:
PFMultiDepthClusterizer(const edm::ParameterSet& conf);
Expand Down

0 comments on commit 3ac8c1a

Please sign in to comment.