Commit 86037ff5 authored by Peter Fackeldey's avatar Peter Fackeldey
Browse files

Merge branch 'scikit-hep-upgrade' of...

Merge branch 'scikit-hep-upgrade' of git.rwth-aachen.de:3pia/cms_analyses/bbww_sl into scikit-hep-upgrade
parents 289affb7 81f143e9
......@@ -176,20 +176,11 @@ analysis.aux["btag_sf_shifts"] = [
analysis.aux["sync"] = {
"lookup_set": {
("mu", "m"),
("ee", "ee"),
("emu", "em"),
("mumu", "mm"),
("min_dhi_jet", "min_dphi_jet"),
("vbf_tag", "VBF_tag"),
("electron", "ele"),
("muon", "mu"),
("jet", "ak4Jet"),
("fat", "ak8Jet"),
("weight_gen_weight", "MC_weight"),
("weight_pileup", "PU_weight"),
("weight_trigger_electron_sf", "weight_trigger_el_sf"),
("weight_trigger_muon_sf", "weight_trigger_mu_sf"),
(b">=", b"geq"),
(b"==", b"eq"),
(b"<=", b"leq"),
(b">", b"g"),
(b"<", b"l"),
},
"categories": ["is_all_incl_sr_prompt"],
"eventnr": "eventnr",
......@@ -263,12 +254,15 @@ class_HHGluGlu_NLO_reweight = aci.Process(
id=17789287,
label="HH(GGF)",
processes=[
analysis.processes.get("HH_2B2WToLNu2J_GluGlu_reweight"),
analysis.processes.get("HH_2B2VTo2L2Nu_GluGlu_reweight"),
analysis.processes.get("HH_2B2Tau_GluGlu_reweight"),
proc
for parent_process in [
"HH_2B2WToLNu2J_GluGlu_reweight",
"HH_2B2VTo2L2Nu_GluGlu_reweight",
"HH_2B2Tau_GluGlu_reweight",
]
for proc in analysis.processes.get(parent_process).processes.query(".*0_0_0")
],
)
analysis.processes.extend([class_HHGluGlu_NLO_reweight])
analysis.aux["multiclass"] = MulticlassConfig(
......@@ -288,7 +282,7 @@ analysis.aux["multiclass"] = MulticlassConfig(
},
},
group="mergedinclusive",
maxn=2e6,
maxn=4e6,
)
......
......@@ -25,14 +25,16 @@ btagWeight_lf=CMS_btag_LF_2016_2017_2018
btagWeight_lfstats1=CMS_btag_lfstats1_2016
btagWeight_lfstats2=CMS_btag_lfstats2_2016
btagWeight_subjet=CMS_btag_subjet_2016
electron_id_loose=CMS_eff_e_2016
electron_tth_loose=CMS_ttHID_eff_e_loose_2016
#electron_id_loose=CMS_ttHID_reco_e_2016
#electron_tth_tight=CMS_ttHID_eff_e_tight_2016
#electron_tth_relaxed=CMS_ttHID_eff_e_loose_2016
jer=CMS_res_j_2016
jet_PUid_efficiency=CMS_eff_j_PUJET_id_2016
jet_PUid_mistag=CMS_eff_j_PUJET_mistag_2016
l1_ecal_prefiring=CMS_l1_ecal_prefiring_2016
muon_idiso_loose=CMS_eff_m_2016
muon_tth_loose=CMS_ttHID_eff_m_loose_2016
#muon_idiso_loose=CMS_eff_m_2016
#muon_tth_tight=CMS_ttHID_eff_m_tight_2016
#muon_tth_relaxed=CMS_ttHID_eff_m_loose_2016
pileup=CMS_pileup_2016
top_pT_reweighting=CMS_top_pT_reweighting
trigger_muon_sf=CMS_bbww_sl_TriggerWeight_mu_2016
......
......@@ -25,14 +25,16 @@ btagWeight_lf=CMS_btag_LF_2016_2017_2018
btagWeight_lfstats1=CMS_btag_lfstats1_2017
btagWeight_lfstats2=CMS_btag_lfstats2_2017
btagWeight_subjet=CMS_btag_subjet_2017
electron_id_loose=CMS_eff_e_2017
electron_tth_loose=CMS_ttHID_eff_e_loose_2017
#electron_id_loose=CMS_ttHID_reco_e_2017
#electron_tth_tight=CMS_ttHID_eff_e_tight_2017
#electron_tth_relaxed=CMS_ttHID_eff_e_loose_2017
jer=CMS_res_j_2017
jet_PUid_efficiency=CMS_eff_j_PUJET_id_2017
jet_PUid_mistag=CMS_eff_j_PUJET_mistag_2017
l1_ecal_prefiring=CMS_l1_ecal_prefiring_2017
muon_idiso_loose=CMS_eff_m_2017
muon_tth_loose=CMS_ttHID_eff_m_loose_2017
#muon_idiso_loose=CMS_eff_m_2017
#muon_tth_tight=CMS_ttHID_eff_m_tight_2017
#muon_tth_relaxed=CMS_ttHID_eff_m_loose_2017
pileup=CMS_pileup_2017
top_pT_reweighting=CMS_top_pT_reweighting
trigger_muon_sf=CMS_bbww_sl_TriggerWeight_mu_2017
......
......@@ -25,14 +25,16 @@ btagWeight_hfstats2=CMS_btag_hfstats2_2018
btagWeight_lf=CMS_btag_LF_2016_2017_2018
btagWeight_lfstats1=CMS_btag_lfstats1_2018
btagWeight_lfstats2=CMS_btag_lfstats2_2018
electron_id_loose=CMS_eff_e_2018
electron_tth_loose=CMS_ttHID_eff_e_loose_2018
#electron_id_loose=CMS_ttHID_reco_e_2018
#electron_tth_tight=CMS_ttHID_eff_e_tight_2018
#electron_tth_relaxed=CMS_ttHID_eff_e_loose_2018
jer=CMS_res_j_2018
jet_PUid_efficiency=CMS_eff_j_PUJET_id_2018
jet_PUid_mistag=CMS_eff_j_PUJET_mistag_2018
l1_ecal_prefiring=CMS_l1_ecal_prefiring_2018
muon_idiso_loose=CMS_eff_m_2018
muon_tth_loose=CMS_ttHID_eff_m_loose_2018
#muon_idiso_loose=CMS_eff_m_2018
#muon_tth_tight=CMS_ttHID_eff_m_tight_2018
#muon_tth_relaxed=CMS_ttHID_eff_m_loose_2018
pileup=CMS_pileup_2018
top_pT_reweighting=CMS_top_pT_reweighting
trigger_muon_sf=CMS_bbww_sl_TriggerWeight_mu_2018
......
......@@ -529,21 +529,22 @@ class Base(common.NeutrinoBase, common.Base):
# lepton sf
e_corr = self.corrections["electron"]
mu_corr = self.corrections["muon"]
mask = (abs(lep.pdgId) == 11) & lep.gen_matched
# electrons
# fmt: off
loose_1 = e_corr[f"electron_Tallinn_id_loose_01_EGamma_SF2D"](abs(lep.eta), lep.pt)[mask]
loose_1_err = e_corr[f"electron_Tallinn_id_loose_01_EGamma_SF2D_error"](abs(lep.eta), lep.pt)[mask]
loose_2 = e_corr[f"electron_Tallinn_id_loose_02_EGamma_SF2D"](abs(lep.eta), lep.pt)[mask]
loose_2_err = e_corr[f"electron_Tallinn_id_loose_02_EGamma_SF2D_error"](abs(lep.eta), lep.pt)[mask]
## id and loose tth id
mask = (abs(lep.pdgId) == 11) & lep.gen_matched
loose_1 = e_corr[f"electron_Tallinn_id_loose_01_EGamma_SF2D"](abs(lep.eta), lep.pt)[mask] # using pT
loose_1_err = e_corr[f"electron_Tallinn_id_loose_01_EGamma_SF2D_error"](abs(lep.eta), lep.pt)[mask] # using pT
loose_2 = e_corr[f"electron_Tallinn_id_loose_02_EGamma_SF2D"](abs(lep.eta), lep.pt)[mask] # using pT
loose_2_err = e_corr[f"electron_Tallinn_id_loose_02_EGamma_SF2D_error"](abs(lep.eta), lep.pt)[mask] # using pT
if self.year in ("2016", "2017"):
# for 2016 and 2017 the reco eff is split into a low and high pt region
reco_low = e_corr[f"electron_Tallinn_reco_low_EGamma_SF2D"](lep.eta, lep.pt)[mask & (lep.pt < 20)]
reco_low_err = e_corr[f"electron_Tallinn_reco_low_EGamma_SF2D_error"](lep.eta, lep.pt)[mask & (lep.pt < 20)]
reco_high = e_corr[f"electron_Tallinn_reco_high_EGamma_SF2D"](lep.eta, lep.pt)[mask & (lep.pt >= 20)]
reco_high_err = e_corr[f"electron_Tallinn_reco_high_EGamma_SF2D_error"](lep.eta, lep.pt)[mask & (lep.pt >= 20)]
reco_low = e_corr[f"electron_Tallinn_reco_low_EGamma_SF2D"](lep.eta, lep.pt)[mask & (lep.pt < 20)] # using pT
reco_low_err = e_corr[f"electron_Tallinn_reco_low_EGamma_SF2D_error"](lep.eta, lep.pt)[mask & (lep.pt < 20)] # using pT
reco_high = e_corr[f"electron_Tallinn_reco_high_EGamma_SF2D"](lep.eta, lep.pt)[mask & (lep.pt >= 20)] # using pT
reco_high_err = e_corr[f"electron_Tallinn_reco_high_EGamma_SF2D_error"](lep.eta, lep.pt)[mask & (lep.pt >= 20)] # using pT
electron_id_nom, electron_id_down, electron_id_up = (
reduce(
mul,
......@@ -559,8 +560,8 @@ class Base(common.NeutrinoBase, common.Base):
for sign in (0, -1, +1)
)
elif self.year == "2018":
reco = e_corr[f"electron_Tallinn_reco_EGamma_SF2D"](lep.eta, lep.pt)[mask]
reco_err = e_corr[f"electron_Tallinn_reco_EGamma_SF2D_error"](lep.eta, lep.pt)[mask]
reco = e_corr[f"electron_Tallinn_reco_EGamma_SF2D"](lep.eta, lep.pt)[mask] # using pT
reco_err = e_corr[f"electron_Tallinn_reco_EGamma_SF2D_error"](lep.eta, lep.pt)[mask] # using pT
electron_id_nom, electron_id_down, electron_id_up = (
reduce(
mul,
......@@ -583,44 +584,71 @@ class Base(common.NeutrinoBase, common.Base):
shift=False,
)
# fmt: off
## tight tth id
mask = (abs(lep.pdgId) == 11) & lep.gen_matched & lep.tight_cand
prefix = "electron_Tallinn_tth_loose_EGamma_SF2D"
nominal = ak.prod(e_corr["electron_Tallinn_tth_tight_EGamma_SF2D"](abs(lep.eta), lep.pt)[mask], axis=-1) # using pT
up = ak.prod(e_corr["electron_Tallinn_tth_tight_error_histo_eff_data_max"](abs(lep.eta), lep.pt)[mask], axis=-1) # using pT
down = ak.prod(e_corr["electron_Tallinn_tth_tight_error_histo_eff_data_min"](abs(lep.eta), lep.pt)[mask], axis=-1) # using pT
weights.add(
"electron_tth_loose",
ak.prod(e_corr[f"{prefix}"](abs(lep.eta), lep.pt)[mask], axis=-1),
weightUp=ak.prod(e_corr[f"{prefix}_Up"](abs(lep.eta), lep.pt)[mask], axis=-1),
weightDown=ak.prod(
e_corr[f"{prefix}_Down"](abs(lep.eta), lep.pt)[mask], axis=-1
),
"electron_tth_tight",
nominal,
weightUp=nominal * up,
weightDown=nominal * down,
shift=False,
)
## relaxed tth id
prefix = "electron_Tallinn_tth_relaxed_EGamma_SF2D"
weights.add(
"electron_tth_relaxed",
ak.prod(e_corr[f"{prefix}"](abs(lep.eta), lep.pt)[mask], axis=-1), # using pT
weightUp=ak.prod(e_corr[f"{prefix}_Up"](abs(lep.eta), lep.pt)[mask], axis=-1), # using pT
weightDown=ak.prod(e_corr[f"{prefix}_Down"](abs(lep.eta), lep.pt)[mask], axis=-1), # using pT
)
# fmt: on
# muons
# id&iso
## id&iso
mask = (abs(lep.pdgId) == 13) & lep.gen_matched
prefix = "muon_Tallinn_idiso_loose_EGamma_SF2D"
weights.add(
"muon_idiso_loose",
ak.prod(mu_corr[f"{prefix}"](abs(lep.eta), lep.pt)[mask], axis=-1),
ak.prod(mu_corr[f"{prefix}"](abs(lep.eta), lep.pt)[mask], axis=-1), # using pT
weightUp=ak.where(
ak.any(mask, axis=-1),
ak.prod(mu_corr[f"{prefix}_error"](abs(lep.eta), lep.pt)[mask], axis=-1),
ak.prod(
mu_corr[f"{prefix}_error"](abs(lep.eta), lep.pt)[mask], axis=-1
), # using pT
0,
),
weightDown=None,
shift=True,
)
# loose tth
# fmt: off
## tight tth id
mask = (abs(lep.pdgId) == 13) & lep.gen_matched & lep.tight_cand
prefix = "muon_Tallinn_tth_loose_EGamma_SF2D"
nominal = ak.prod(e_corr["muon_Tallinn_tth_tight_EGamma_SF2D"](abs(lep.eta), lep.pt)[mask], axis=-1) # using pT
up = ak.prod(e_corr["muon_Tallinn_tth_tight_error_histo_eff_data_max"](abs(lep.eta), lep.pt)[mask], axis=-1) # using pT
down = ak.prod(e_corr["muon_Tallinn_tth_tight_error_histo_eff_data_min"](abs(lep.eta), lep.pt)[mask], axis=-1) # using pT
weights.add(
"muon_tth_loose",
ak.prod(mu_corr[f"{prefix}"](abs(lep.eta), lep.pt)[mask], axis=-1),
weightUp=ak.prod(mu_corr[f"{prefix}_Up"](abs(lep.eta), lep.pt)[mask], axis=-1),
weightDown=ak.prod(
mu_corr[f"{prefix}_Down"](abs(lep.eta), lep.pt)[mask], axis=-1
),
"muon_tth_tight",
nominal,
weightUp=nominal * up,
weightDown=nominal * down,
shift=False,
)
## relaxed tth id
prefix = f"muon_Tallinn_tth_relaxed_EGamma_SF2D"
weights.add(
f"muon_tth_relaxed",
ak.prod(mu_corr[f"{prefix}"](abs(lep.eta), lep.pt)[mask], axis=-1), # using pT
weightUp=ak.prod(mu_corr[f"{prefix}_Up"](abs(lep.eta), lep.pt)[mask], axis=-1), # using pT
weightDown=ak.prod(mu_corr[f"{prefix}_Down"](abs(lep.eta), lep.pt)[mask], axis=-1), # using pT
)
# fmt: on
# trigger sf (based on cone_pt)
lep_pt = util.normalize(lep.pt, pad=True)
......@@ -952,10 +980,6 @@ class Processor(common.DNNBase, Base, Histogramer):
# debug_dataset = "DYJetsToLL_M-10to50"
# debug_uuids = ["567332A7-A3B6-F64D-B089-32E53B924B97"]
# Sync with Gourab Saha 07.10.21
# debug_dataset = "TTToSemiLeptonic"
# debug_uuids = ["1A511872-6E69-6945-8499-B2696EC437AD"]
@classmethod
def group_processes(cls, hists, task):
def fakes(cat):
......@@ -1143,14 +1167,19 @@ class SyncSelectionExporter(Base, MCOnly, TreeExporter):
tree_id = "syncTree_hhbb1l"
# debug_dataset = "GluGluToHHTo2B2WToLNu2J_node_SM" # "GluGluToHHTo2B2VLNu2J_node_cHHH1"
# debug_uuids = {"4D72A2BC-B237-FE42-813A-1F4E27F3B76B"}
debug_dataset = "data_F_e"
# Sync with Gourab Saha 22.02.22
debug_dataset = "TTToSemiLeptonic"
debug_uuids = [
"10120317-9F80-2943-933F-4B9C762F28A3.root",
"3AC5E417-8AF7-7440-90C2-EFDE6DE36E0A.root",
"BE40ED42-D84D-D741-9681-6E5263EA0486.root",
]
"1A511872-6E69-6945-8499-B2696EC437AD",
"9EBB05A3-E0F1-944C-929E-FED7F5A88926",
] # year 2016
# debug_dataset = "data_F_e"
# debug_uuids = [
# "10120317-9F80-2943-933F-4B9C762F28A3.root",
# "3AC5E417-8AF7-7440-90C2-EFDE6DE36E0A.root",
# "BE40ED42-D84D-D741-9681-6E5263EA0486.root",
# ]
groups = {
"resolved": "resolved_[12]b",
"incl": "boosted|resolved_[12]b",
......@@ -1194,15 +1223,18 @@ class SyncSelectionExporter(Base, MCOnly, TreeExporter):
)
weights = [
"PDFSet",
"PDFSet_off",
"PDFSet_rel",
"PSWeight_ISR",
"PSWeight_FSR",
"ScaleWeight_Fact",
"ScaleWeight_Renorm",
"ScaleWeight_Mixed",
"ScaleWeight_Envelope",
"gen_weight",
"pileup",
"l1_ecal_prefiring",
"top_pT_reweighting",
"electron_id_loose",
"electron_tth_loose",
"muon_idiso_loose",
......@@ -1254,12 +1286,22 @@ class SyncSelectionExporter(Base, MCOnly, TreeExporter):
tensors["electron"] = ("sync_electrons", 1, common + ("cone_pt", "pdgId", "charge", "mvaTTH"), np.float32, {"groups": ["part"]}) # "gen_matched"
tensors["muon"] = ("sync_muons", 1, common + ("cone_pt", "pdgId", "charge", "mvaTTH"), np.float32, {"groups": ["part"]}) # "gen_matched"
tensors["presel_vbf_jets"] = ("presel_vbf_jets", 2, common, np.float32, {"groups": ["multiclass", "input", "part"]})
if not (self.debug and "data" in self.debug_dataset):
if not ("data" in self.debug_dataset):
tensors["weights"] = ("weights", 0, self.weights, np.float32, {})
tensors["eventnr"] = (None, 0, ["eventnr", "dataset_id"], np.int64, {"groups": ["multiclass", "split"]})
# fmt: on
return tensors
def arrays(self, X):
out = super().arrays(X)
for sel in X["selection"]._names:
selection = X["selection"].require(**{sel: True})
out[f"selection_{sel}"] = selection
return out
def categories(self, select_output):
return {"all": slice(None)}
class FindBadGenJetMatch(BaseProcessor):
debug_dataset = "TTTo2L2Nu"
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment