Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Run3-alca161 Correct some of the scripts used for IsoTrack Calibration #29017

Merged
merged 1 commit into from Feb 25, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
71 changes: 71 additions & 0 deletions Calibration/HcalCalibAlgos/macros/CalibSort.C
Expand Up @@ -835,3 +835,74 @@ void findDuplicateEvent(std::string infile, std::string outfile,
sortEvent(records,debug);
duplicateEvent(outfile, records, debug);
}

void combine(const std::string fname1, const std::string fname2,
const std::string fname, const std::string outfile, int debug) {
ifstream infile1 (fname1.c_str());
ifstream infile2 (fname2.c_str());
if ((!infile1.is_open()) || (!infile2.is_open())) {
std::cout << "Cannot open " << fname1 << " or " << fname2 << std::endl;
} else {
std::ofstream file;
file.open(fname.c_str(), std::ofstream::out);
TH1D* hist0 = new TH1D("W0", "Correction Factor (All)", 100, 0.0, 10.0);
TH1D* hist1 = new TH1D("W1", "Correction Factor (Barrel)", 100, 0.0, 10.0);
TH1D* hist2 = new TH1D("W2", "Correction Factor (Endcap)", 100, 0.0, 10.0);
TProfile* prof = new TProfile("P", "Correction vs i#eta", 60, -30, 30, 0, 100, "");
unsigned int kount(0), kout(0);
double wmaxb(0), wmaxe(0);
while (1) {
double evno1, eta1, phi1, wt1;
double evno2, eta2, phi2, wt2;
infile1 >> evno1 >> eta1 >> phi1 >> wt1;
infile2 >> evno2 >> eta2 >> phi2 >> wt2;
if ((!infile1.good()) || (!infile2.good())) break;
long int iev1 = evno1;
long int iev2 = evno2;
if (debug > 1) {
int ieta1(eta1), ieta2(eta2), iphi1(phi1), iphi2(phi2);
std::cout << kount << " Event " << iev1 << ":" << iev2 << " eta "
<< ieta1 << ":" << ieta2 << " phi " << iphi1 << ":" << iphi2
<< " wt " << wt1 << ":" << wt2 << std::endl;
}
if (iev1 == iev2) {
int ieta = fabs(eta1);
double wt = (ieta >= 16) ? wt1 : wt2;
file << std::setw(8) << kount << " " << std::setw(12)
<< std::setprecision(8) << wt << std::endl;
++kout;
hist0->Fill(wt);
if (ieta >= 16) {
hist2->Fill(wt);
if (wt > wmaxe) wmaxe = wt;
} else {
hist1->Fill(wt);
if (wt > wmaxb) wmaxb = wt;
}
prof->Fill(eta1, wt);
} else if (debug > 0) {
int ieta1(eta1), ieta2(eta2), iphi1(phi1), iphi2(phi2);
std::cout << kount << " Event " << iev1 << ":" << iev2 << " eta "
<< ieta1 << ":" << ieta2 << " phi " << iphi1 << ":" << iphi2
<< " wt " << wt1 << ":" << wt2 << std::endl;
}
++kount;
}
infile1.close();
infile2.close();
file.close();
std::cout << "Writes " << kout << " entries to " << fname << " from "
<< kount << " events from " << fname1 << " and " << fname2
<< " maximum correction factor " << wmaxb << " (Barrel) "
<< wmaxe << " (Endcap)" << std::endl;
if (outfile != "") {
TFile* theFile = new TFile(outfile.c_str(), "RECREATE");
theFile->cd();
hist0->Write();
hist1->Write();
hist2->Write();
prof->Write();
theFile->Close();
}
}
}
7 changes: 5 additions & 2 deletions Calibration/HcalCalibAlgos/macros/isotrackApplyRegressor.py
@@ -1,7 +1,8 @@
######################################################################################
# Evaluates regressor from loaded model
# Usage:
# python3 isotrackApplyRegressor.py -PU root://cmseos.fnal.gov//store/user/sghosh/ISOTRACK/DIPI_2021_PUpart.root -M ./models/model1.h5 -O corrfac_regression.txt
# python3 isotrackApplyRegressor.py -PU root://cmseos.fnal.gov//store/user/sghosh/ISOTRACK/DIPI_2021_PUpart.root -M ./models/model1.h5 -O corrfac1.txt
# python3 isotrackApplyRegressor.py -PU root://cmseos.fnal.gov//store/user/sghosh/ISOTRACK/DIPI_2021_PUpart.root -M ./models/model2.h5 -O corrfac2.txt
######################################################################################
# coding: utf-8

Expand Down Expand Up @@ -30,6 +31,7 @@

parser = argparse.ArgumentParser()
parser.add_argument("-PU", "--filePU",help="input PU file",default="root://cmseos.fnal.gov//store/user/sghosh/ISOTRACK/DIPI_2021_PUpart.root")
#parser.add_argument("-PU", "--filePU",help="input PU file",default="/eos/uscms/store/user/sghosh/ISOTRACK/DIPI_2021_noPU.root")

parser.add_argument("-M", "--modelname",help="model file name",default="./models/model.h5")
parser.add_argument("-O", "--opfilename",help="output text file name",default="corrfac_regression.txt")
Expand All @@ -47,6 +49,7 @@

branchespu = ['t_Run','t_Event','t_nVtx','t_ieta','t_iphi','t_p','t_pt','t_gentrackP','t_eMipDR','t_eHcal','t_eHcal10','t_eHcal30','t_hmaxNearP','t_emaxNearP','t_hAnnular','t_eAnnular','t_rhoh']
dictpu = tree1.arrays(branches=branchespu)
#dictpu = tree1.arrays(branches=branchespu,entrystart=0, entrystop=300)
dfspu = pd.DataFrame.from_dict(dictpu)
dfspu.columns=branchespu
print ("sample size:",dfspu.shape[0])
Expand All @@ -69,7 +72,7 @@
#df[cols_to_stand] = df[cols_to_stand].apply(lambda x: (x - x.mean()) /(x.std()))
#df[cols_to_minmax] = df[cols_to_minmax].apply(lambda x: (x - x.mean()) / (x.max() - x.min()))
# #(x.max() - x.min()))
df[cols_to_minmax] = df[cols_to_minmax].apply(lambda x: (x - x.min()) / (x.max() - x.min()))
df[cols_to_minmax] = df[cols_to_minmax].apply(lambda x: (x - x.min()) / (x.max() - x.min()) if (x.max() - x.min() > 0) else 1.0/200.0)


uncorrected_values = df['t_eHcal_xun'].values
Expand Down
29 changes: 19 additions & 10 deletions Calibration/HcalCalibAlgos/macros/isotrackTrainRegressor.py
Expand Up @@ -2,6 +2,7 @@
# Trains regressor and saves model for evaluation
# Usage:
# python3 isotrackTrainRegressor.py -I isotk_relval_hi.pkl -V 1
# python3 isotrackTrainRegressor.py -I isotk_relval_lo.pkl -V 2
######################################################################################


Expand Down Expand Up @@ -141,10 +142,18 @@ def propweights(y_true):
model.summary()
#fitting
print ("fitting now=========>")
history = model.fit(X_train,Y_train , batch_size=5000, epochs=1000, validation_split=0.2, verbose=1,sample_weight=propweights(Y_train))
history = model.fit(X_train,Y_train , batch_size=5000, epochs=100, validation_split=0.2, verbose=1,sample_weight=propweights(Y_train))


# In[7]:
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig(modv+'_loss_distributions.png')
plt.close()


preds = model.predict(X_test[:,0:12])
Expand All @@ -158,8 +167,8 @@ def propweights(y_true):
plt.yscale('log')
plt.title("Energy distribution")
plt.legend(loc='upper right')
plt.savefig('energy_distributions.png')

plt.savefig(modv+'_energy_distributions.png')
plt.close()

preds = preds.reshape(preds.shape[0])
print (preds.shape)
Expand All @@ -175,8 +184,8 @@ def propweights(y_true):
#plt.yscale('log')
plt.title("error distribution")
plt.legend(loc='upper right')
plt.savefig('errors.png')

plt.savefig(modv+'_errors.png')
plt.close()

#plt.scatter(targets, marinascorr,alpha=0.3,label='marinascorr')
plt.scatter(targets, uncorrected,alpha=0.3,label='uncorr')
Expand All @@ -187,9 +196,9 @@ def propweights(y_true):
lims = [0, 200]
plt.xlim(lims)
plt.ylim(lims)
#_ = plt.plot(lims, lims)
plt.savefig('energyscatt.png')

_ = plt.plot(lims, lims)
plt.savefig(modv+'_energyscatt.png')
plt.close()



Expand All @@ -204,8 +213,8 @@ def propweights(y_true):
plt.legend(loc='upper right')
plt.yscale('log')
plt.title("E/p distributions")
plt.savefig('eopdist.png')

plt.savefig(modv+'_eopdist.png')
plt.close()

# In[8]:

Expand Down