Skip to content

Commit

Permalink
Merge pull request #19771 from perahrens/CMSSW_9_3_X
Browse files Browse the repository at this point in the history
Embedding developments: New feature electron embedding and bugfixes
  • Loading branch information
cmsbuild committed Sep 18, 2017
2 parents a3917b1 + 7c9476c commit aa837d3
Show file tree
Hide file tree
Showing 7 changed files with 276 additions and 190 deletions.
11 changes: 5 additions & 6 deletions GeneratorInterface/Core/interface/EmbeddingHepMCFilter.h
Expand Up @@ -17,7 +17,7 @@ class EmbeddingHepMCFilter : public BaseHepMCFilter{
const int muonPDGID_ = 13;
const int electron_neutrino_PDGID_ = 12;
const int electronPDGID_ = 11;
const int ZPDGID_ = 23;
const int ZPDGID_ = 23;

enum class TauDecayMode : int
{
Expand Down Expand Up @@ -74,11 +74,10 @@ class EmbeddingHepMCFilter : public BaseHepMCFilter{

std::vector<CutsContainer> cuts_;
DecayChannel DecayChannel_;

virtual void fill_cut(std::string cut_string, EmbeddingHepMCFilter::DecayChannel &dc, CutsContainer &cut);
virtual void fill_cuts(std::string cut_string, EmbeddingHepMCFilter::DecayChannel &dc);



virtual void fill_cut(std::string cut_string, EmbeddingHepMCFilter::DecayChannel &dc, CutsContainer &cut);
virtual void fill_cuts(std::string cut_string, EmbeddingHepMCFilter::DecayChannel &dc);

virtual void decay_and_sump4Vis(HepMC::GenParticle* particle, reco::Candidate::LorentzVector &p4Vis);
virtual void sort_by_convention(std::vector<reco::Candidate::LorentzVector> &p4VisPair);
virtual bool apply_cuts(std::vector<reco::Candidate::LorentzVector> &p4VisPair);
Expand Down
82 changes: 44 additions & 38 deletions GeneratorInterface/Core/src/EmbeddingHepMCFilter.cc
Expand Up @@ -47,40 +47,46 @@ EmbeddingHepMCFilter::filter(const HepMC::GenEvent* evt)
//Reset DecayChannel_ and p4VisPair_ at the beginning of each event.
DecayChannel_.reset();
std::vector<reco::Candidate::LorentzVector> p4VisPair_;

// Going through the particle list. Mother particles are allways before their children.
// One can stop the loop after the second tau is reached and processed.
for ( HepMC::GenEvent::particle_const_iterator particle = evt->particles_begin(); particle != evt->particles_end(); ++particle )
{
int mom_id = 0; // No particle available with PDG ID 0
if ((*particle)->production_vertex() != 0){ // search for the mom via the production_vertex
if ((*particle)->production_vertex()->particles_in_const_begin() != (*particle)->production_vertex()->particles_in_const_end()){
mom_id = (*(*particle)->production_vertex()->particles_in_const_begin())->pdg_id(); // mom was found
}
}

if (std::abs((*particle)->pdg_id()) == tauonPDGID_ && mom_id == ZPDGID_)
{
reco::Candidate::LorentzVector p4Vis;
decay_and_sump4Vis((*particle), p4Vis); // recursive access to final states.
p4VisPair_.push_back(p4Vis);
int mom_id = 0; // No particle available with PDG ID 0
if ((*particle)->production_vertex() != 0){ // search for the mom via the production_vertex
if ((*particle)->production_vertex()->particles_in_const_begin() != (*particle)->production_vertex()->particles_in_const_end()){
mom_id = (*(*particle)->production_vertex()->particles_in_const_begin())->pdg_id(); // mom was found
}
else if (std::abs((*particle)->pdg_id()) == muonPDGID_ && mom_id == ZPDGID_) // Also handle the option when Z-> mumu
{
reco::Candidate::LorentzVector p4Vis = (reco::Candidate::LorentzVector) (*particle)->momentum();
DecayChannel_.fill(TauDecayMode::Muon); // take the muon cuts
p4VisPair_.push_back(p4Vis);
}
}

if (std::abs((*particle)->pdg_id()) == tauonPDGID_ && mom_id == ZPDGID_) {
reco::Candidate::LorentzVector p4Vis;
decay_and_sump4Vis((*particle), p4Vis); // recursive access to final states.
p4VisPair_.push_back(p4Vis);
} else if (std::abs((*particle)->pdg_id()) == muonPDGID_ && mom_id == ZPDGID_) {// Also handle the option when Z-> mumu
reco::Candidate::LorentzVector p4Vis = (reco::Candidate::LorentzVector) (*particle)->momentum();
DecayChannel_.fill(TauDecayMode::Muon); // take the muon cuts
p4VisPair_.push_back(p4Vis);
} else if (std::abs((*particle)->pdg_id()) == electronPDGID_ && mom_id == ZPDGID_) {// Also handle the option when Z-> ee
reco::Candidate::LorentzVector p4Vis = (reco::Candidate::LorentzVector) (*particle)->momentum();
DecayChannel_.fill(TauDecayMode::Electron); // take the electron cuts
p4VisPair_.push_back(p4Vis);
}

}
// Putting DecayChannel_ in default convention:
// For mixed decay channels use the Electron_Muon, Electron_Hadronic, Muon_Hadronic convention.
// For symmetric decay channels (e.g. Muon_Muon) use Leading_Trailing convention with respect to Pt.
sort_by_convention(p4VisPair_);
edm::LogInfo("EmbeddingHepMCFilter") << "Quantities of the visible decay products:"
<< "\tPt's: " << " 1st " << p4VisPair_[0].Pt() << ", 2nd " << p4VisPair_[1].Pt()
<< "\tEta's: " << " 1st " << p4VisPair_[0].Eta() << ", 2nd " << p4VisPair_[1].Eta()
<< " decay channel: " << return_mode(DecayChannel_.first)<< return_mode(DecayChannel_.second);
if (p4VisPair_.size() == 2) {
sort_by_convention(p4VisPair_);
edm::LogInfo("EmbeddingHepMCFilter") << "Quantities of the visible decay products:"
<< "\tPt's: " << " 1st " << p4VisPair_[0].Pt() << ", 2nd " << p4VisPair_[1].Pt()
<< "\tEta's: " << " 1st " << p4VisPair_[0].Eta() << ", 2nd " << p4VisPair_[1].Eta()
<< " decay channel: " << return_mode(DecayChannel_.first)<< return_mode(DecayChannel_.second);
}
else {
edm::LogError("EmbeddingHepMCFilter") << "Size less non equal two";
}

return apply_cuts(p4VisPair_);
}
Expand Down Expand Up @@ -155,20 +161,20 @@ EmbeddingHepMCFilter::apply_cuts(std::vector<reco::Candidate::LorentzVector> &p4
{
for (std::vector<CutsContainer>::const_iterator cut = cuts_.begin(); cut != cuts_.end(); ++cut)
{
if(DecayChannel_.first == cut->decaychannel.first && DecayChannel_.second == cut->decaychannel.second){ // First the match to the decay channel
edm::LogInfo("EmbeddingHepMCFilter") << "Cut pt1 = " << cut->pt1 << " pt2 = " << cut->pt2
<< " abs(eta1) = " << cut->eta1 << " abs(eta2) = " << cut->eta2
<< " decay channel: " << return_mode(cut->decaychannel.first)
<< return_mode(cut->decaychannel.second);
if((cut->pt1 == -1. || (p4VisPair[0].Pt() > cut->pt1)) &&
(cut->pt2 == -1. || (p4VisPair[1].Pt() > cut->pt2)) &&
(cut->eta1 == -1.|| (std::abs(p4VisPair[0].Eta()) < cut->eta1)) &&
(cut->eta2 == -1.|| (std::abs(p4VisPair[1].Eta()) < cut->eta2))){
edm::LogInfo("EmbeddingHepMCFilter") << "This cut was passed (Stop here and take the event)";
return true;
}
}
if(DecayChannel_.first == cut->decaychannel.first && DecayChannel_.second == cut->decaychannel.second){ // First the match to the decay channel
edm::LogInfo("EmbeddingHepMCFilter") << "Cut pt1 = " << cut->pt1 << " pt2 = " << cut->pt2
<< " abs(eta1) = " << cut->eta1 << " abs(eta2) = " << cut->eta2
<< " decay channel: " << return_mode(cut->decaychannel.first)
<< return_mode(cut->decaychannel.second);

if((cut->pt1 == -1. || (p4VisPair[0].Pt() > cut->pt1)) &&
(cut->pt2 == -1. || (p4VisPair[1].Pt() > cut->pt2)) &&
(cut->eta1 == -1.|| (std::abs(p4VisPair[0].Eta()) < cut->eta1)) &&
(cut->eta2 == -1.|| (std::abs(p4VisPair[1].Eta()) < cut->eta2))){
edm::LogInfo("EmbeddingHepMCFilter") << "This cut was passed (Stop here and take the event)";
return true;
}
}
}
return false;
}
Expand Down
58 changes: 29 additions & 29 deletions TauAnalysis/MCEmbeddingTools/plugins/CollectionMerger.cc
Expand Up @@ -46,23 +46,23 @@ void CollectionMerger<T1,T2>::fill_output_obj_tracker(std::unique_ptr<MergeColl

std::map<uint32_t, std::vector<BaseHit> > output_map;
// First merge the collections with the help of the output map
for (auto inputCollection : inputCollections){
for (auto const & inputCollection : inputCollections){
for ( typename MergeCollection::const_iterator clustSet = inputCollection->begin(); clustSet!= inputCollection->end(); ++clustSet ) {
DetId detIdObject( clustSet->detId() );
for (typename edmNew::DetSet<BaseHit>::const_iterator clustIt = clustSet->begin(); clustIt != clustSet->end(); ++clustIt ) {
output_map[detIdObject.rawId()].push_back(*clustIt);
for (typename edmNew::DetSet<BaseHit>::const_iterator clustIt = clustSet->begin(); clustIt != clustSet->end(); ++clustIt ) {
output_map[detIdObject.rawId()].push_back(*clustIt);
}
}
}
}
// Now save it into the standard CMSSW format, with the standard Filler
for (typename std::map<uint32_t, std::vector<BaseHit> >::const_iterator outHits = output_map.begin(); outHits != output_map.end(); ++outHits ) {
DetId detIdObject(outHits->first);
typename MergeCollection::FastFiller spc(*output, detIdObject);
for (auto Hit : outHits->second){
for (auto Hit : outHits->second){
spc.push_back(Hit);
}
}
}
}

}


Expand All @@ -71,21 +71,21 @@ void CollectionMerger<T1,T2>::fill_output_obj_tracker(std::unique_ptr<MergeColl
template <typename T1, typename T2>
void CollectionMerger<T1,T2>::fill_output_obj_calo(std::unique_ptr<MergeCollection > & output, std::vector<edm::Handle<MergeCollection> > &inputCollections)
{
std::map<uint32_t, BaseHit > output_map;
// First merge the two collections again
for (auto inputCollection : inputCollections){
for ( typename MergeCollection::const_iterator recHit = inputCollection->begin(); recHit!= inputCollection->end(); ++recHit ) {
DetId detIdObject( recHit->detid().rawId() );
T2 *akt_calo_obj = &output_map[detIdObject.rawId()];
float new_energy = akt_calo_obj->energy() + recHit->energy();
T2 newRecHit(*recHit);
newRecHit.setEnergy(new_energy);
*akt_calo_obj = newRecHit;
}
std::map<uint32_t, BaseHit > output_map;
// First merge the two collections again
for (auto const & inputCollection : inputCollections){
for ( typename MergeCollection::const_iterator recHit = inputCollection->begin(); recHit!= inputCollection->end(); ++recHit ) {
DetId detIdObject( recHit->detid().rawId() );
T2 *akt_calo_obj = &output_map[detIdObject.rawId()];
float new_energy = akt_calo_obj->energy() + recHit->energy();
T2 newRecHit(*recHit);
newRecHit.setEnergy(new_energy);
*akt_calo_obj = newRecHit;
}
}
// Now save it into the standard CMSSW format
for (typename std::map<uint32_t, BaseHit >::const_iterator outHits = output_map.begin(); outHits != output_map.end(); ++outHits ) {
output->push_back(outHits->second);
}
for (typename std::map<uint32_t, BaseHit >::const_iterator outHits = output_map.begin(); outHits != output_map.end(); ++outHits ) {
output->push_back(outHits->second);
}
output->sort(); //Do a sort for this collection
}
Expand All @@ -98,16 +98,16 @@ void CollectionMerger<T1,T2>::fill_output_obj_muonchamber(std::unique_ptr<Merge
{
std::map<uint32_t, std::vector<BaseHit> > output_map;
// First merge the collections with the help of the output map
for (auto inputCollection : inputCollections){
for (auto const & inputCollection : inputCollections){
for ( typename MergeCollection::const_iterator recHit = inputCollection->begin(); recHit!= inputCollection->end(); ++recHit ) {
DetId detIdObject( recHit->geographicalId() );
output_map[detIdObject].push_back(*recHit);
DetId detIdObject( recHit->geographicalId() );
output_map[detIdObject].push_back(*recHit);
}
}
}
// Now save it into the standard CMSSW format, with the standard Filler
for (typename std::map<uint32_t, std::vector<BaseHit> >::const_iterator outHits = output_map.begin(); outHits != output_map.end(); ++outHits ) {
output->put((typename T1::id_iterator::value_type) outHits->first , outHits->second.begin(), outHits->second.end()); // The DTLayerId misses the automatic type cast
}
output->put((typename T1::id_iterator::value_type) outHits->first , outHits->second.begin(), outHits->second.end()); // The DTLayerId misses the automatic type cast
}
}


Expand All @@ -126,7 +126,7 @@ template <>
void CollectionMerger<edmNew::DetSetVector<SiPixelCluster>, SiPixelCluster>::fill_output_obj(std::unique_ptr<MergeCollection > & output, std::vector<edm::Handle<MergeCollection> > &inputCollections)
{
fill_output_obj_tracker(output,inputCollections,true);

}


Expand Down
2 changes: 1 addition & 1 deletion TauAnalysis/MCEmbeddingTools/plugins/CollectionMerger.h
Expand Up @@ -68,7 +68,7 @@ template <typename T1, typename T2>
CollectionMerger<T1,T2>::CollectionMerger(const edm::ParameterSet& iConfig)
{
std::vector<edm::InputTag> inCollections = iConfig.getParameter<std::vector<edm::InputTag> >("mergCollections");
for (auto inCollection : inCollections){
for (auto const & inCollection : inCollections){
inputs_[inCollection.instance()].push_back(consumes<MergeCollection >(inCollection));
}
for (auto toproduce : inputs_){
Expand Down

0 comments on commit aa837d3

Please sign in to comment.