From 196fa1d419241395216227658f385aea0d4c801b Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Fri, 26 Apr 2024 16:19:14 -0700 Subject: [PATCH 01/27] need to merge newest --- analysis/data/2016_mc_ana_corrections.json | 7 +++++ .../simps/Tight_2016_simp_SR_analysis.json | 22 +++++++++++++++ .../radMatchTight_2016_simp_SR_analysis.json | 27 +++++++++++++++++++ processors/config/anaSimps_2016_cfg.py | 14 +++++----- processors/src/NewVertexAnaProcessor.cxx | 2 ++ 5 files changed, 65 insertions(+), 7 deletions(-) create mode 100644 analysis/data/2016_mc_ana_corrections.json create mode 100644 analysis/selections/simps/Tight_2016_simp_SR_analysis.json create mode 100644 analysis/selections/simps/radMatchTight_2016_simp_SR_analysis.json diff --git a/analysis/data/2016_mc_ana_corrections.json b/analysis/data/2016_mc_ana_corrections.json new file mode 100644 index 000000000..5660078d5 --- /dev/null +++ b/analysis/data/2016_mc_ana_corrections.json @@ -0,0 +1,7 @@ +{ + "corrections": { + "track_time": -5.5, + "track_cluster_dt": -0.7, + "track_z0": -0.06 + } +} diff --git a/analysis/selections/simps/Tight_2016_simp_SR_analysis.json b/analysis/selections/simps/Tight_2016_simp_SR_analysis.json new file mode 100644 index 000000000..76eee9311 --- /dev/null +++ b/analysis/selections/simps/Tight_2016_simp_SR_analysis.json @@ -0,0 +1,22 @@ +{ + "L1Requirement_eq" : { + "cut" : 1, + "id" : 0, + "info" : "L1L1" + }, + "pSum_lt" : { + "cut" : 1.9, + "id" : 1, + "info" : "P_{e^{-}} + P_{e^{+}} < 1.9 [GeV]" + }, + "pSum_gt" : { + "cut" : 0.4, + "id" : 2, + "info" : "P_{e^{-}} + P_{e^{+}} > 0.4 GeV" + }, + "nVtxs_eq" : { + "cut" : 1, + "id" : 3, + "info" : "N_{vtx}=1" + } +} diff --git a/analysis/selections/simps/radMatchTight_2016_simp_SR_analysis.json b/analysis/selections/simps/radMatchTight_2016_simp_SR_analysis.json new file mode 100644 index 000000000..fca0a9275 --- /dev/null +++ b/analysis/selections/simps/radMatchTight_2016_simp_SR_analysis.json @@ -0,0 +1,27 @@ +{ + "L1Requirement_eq" : { + "cut" : 1, + "id" : 0, + "info" : "L1L1" + }, + "pSum_lt" : { + "cut" : 1.9, + "id" : 1, + "info" : "P_{e^{-}} + P_{e^{+}} < 1.9 [GeV]" + }, + "pSum_gt" : { + "cut" : 0.4, + "id" : 2, + "info" : "P_{e^{-}} + P_{e^{+}} > 0.4 GeV" + }, + "isRadEle_eq" : { + "cut" : 1, + "id" : 3, + "info" : "isRadEle" + }, + "nVtxs_eq" : { + "cut" : 1, + "id" : 4, + "info" : "N_{vtx}=1" + } +} diff --git a/processors/config/anaSimps_2016_cfg.py b/processors/config/anaSimps_2016_cfg.py index a673267d5..01afe51fc 100644 --- a/processors/config/anaSimps_2016_cfg.py +++ b/processors/config/anaSimps_2016_cfg.py @@ -30,7 +30,7 @@ # Processors # ############################### vtxana = HpstrConf.Processor('vtxana', 'NewVertexAnaProcessor') -mcana = HpstrConf.Processor('mcpartana', 'MCAnaProcessor') +#mcana = HpstrConf.Processor('mcpartana', 'MCAnaProcessor') ############################### # Processor Configuration # ############################### @@ -94,12 +94,12 @@ vtxana.parameters["regionDefinitions"] = RegionDefinitions #MCParticleAna -mcana.parameters["debug"] = 0 -mcana.parameters["anaName"] = "mcAna" -mcana.parameters["partColl"] = "MCParticle" -mcana.parameters["trkrHitColl"] = "TrackerHits" -mcana.parameters["ecalHitColl"] = "EcalHits" -mcana.parameters["histCfg"] = os.environ['HPSTR_BASE']+'/analysis/plotconfigs/mc/basicMC.json' +#mcana.parameters["debug"] = 0 +#mcana.parameters["anaName"] = "mcAna" +#mcana.parameters["partColl"] = "MCParticle" +#mcana.parameters["trkrHitColl"] = "TrackerHits" +#mcana.parameters["ecalHitColl"] = "EcalHits" +#mcana.parameters["histCfg"] = os.environ['HPSTR_BASE']+'/analysis/plotconfigs/mc/basicMC.json' # Sequence which the processors will run. p.sequence = [vtxana] # ,mcana] diff --git a/processors/src/NewVertexAnaProcessor.cxx b/processors/src/NewVertexAnaProcessor.cxx index 8b379beee..49c3eb686 100644 --- a/processors/src/NewVertexAnaProcessor.cxx +++ b/processors/src/NewVertexAnaProcessor.cxx @@ -333,6 +333,8 @@ bool NewVertexAnaProcessor::process(IEvent* ievent) { if (isData_) { if (!vtxSelector->passCutEq("Pair1_eq",(int)evth_->isPair1Trigger(),weight)) break; + if (!vtxSelector->passCutEq("Single0_eq",(int)evth_->isSingle0Trigger(),weight)) + break; } bool foundParts = _ah->GetParticlesFromVtx(vtx,ele,pos); From ab706eaaa34e64ab08704948a863b1514dc0f2f1 Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Mon, 29 Apr 2024 08:46:02 -0700 Subject: [PATCH 02/27] junk --- processors/src/NewVertexAnaProcessor.cxx | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/processors/src/NewVertexAnaProcessor.cxx b/processors/src/NewVertexAnaProcessor.cxx index f85ab99b2..b8351150a 100644 --- a/processors/src/NewVertexAnaProcessor.cxx +++ b/processors/src/NewVertexAnaProcessor.cxx @@ -47,8 +47,12 @@ void NewVertexAnaProcessor::configure(const ParameterSet& parameters) { //v0 projection fits v0ProjectionFitsCfg_ = parameters.getString("v0ProjectionFitsCfg", v0ProjectionFitsCfg_); - //beamspot positions + //beamspot positions beamPosCfg_ = parameters.getString("beamPosCfg", beamPosCfg_); + + //misc corrections (such as track time bias, z0 bias, etc) + //anaCorrectionsCfg_ = parameters.getSTring("anaCorrectionsCfg", anaCorrectionsCfg_); + //track time bias corrections eleTrackTimeBias_ = parameters.getDouble("eleTrackTimeBias",eleTrackTimeBias_); posTrackTimeBias_ = parameters.getDouble("posTrackTimeBias",posTrackTimeBias_); @@ -82,6 +86,14 @@ void NewVertexAnaProcessor::initialize(TTree* tree) { _mc_vtx_histos->Define2DHistos(); } + //Ana corrections to misc parameters + /* + if(!anaCorrectionsCfg_.empty()){ + std::ifstream anac_file(anaCorrectionsCfg_); + anac_file >> anac_configs_; + anac_file.close(); + }*/ + //Load Run Dependent V0 target projection fits from json if(!v0ProjectionFitsCfg_.empty()){ std::ifstream v0proj_file(v0ProjectionFitsCfg_); @@ -96,6 +108,7 @@ void NewVertexAnaProcessor::initialize(TTree* tree) { bpc_file >> bpc_configs_; bpc_file.close(); } + // histos = new MCAnaHistos(anaName_); //histos->loadHistoConfig(histCfgFilename_) //histos->DefineHistos(); From 900b5b8e200f43f2034fcf3e08f9f1f6158174ef Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Tue, 7 May 2024 15:03:48 -0700 Subject: [PATCH 03/27] updating a few things that the new verte ana processor uses. Added momentum smearing to vertex object...i think correctly... --- event/src/Track.cxx | 4 +- processors/config/anaSimps_2016_cfg.py | 35 +- processors/include/NewVertexAnaProcessor.h | 4 + processors/src/NewVertexAnaProcessor.cxx | 2958 ++++++++++---------- utils/include/TrackSmearingTool.h | 4 +- utils/src/TrackSmearingTool.cxx | 20 +- 6 files changed, 1545 insertions(+), 1480 deletions(-) diff --git a/event/src/Track.cxx b/event/src/Track.cxx index ea673f1a2..bce6b2c05 100644 --- a/event/src/Track.cxx +++ b/event/src/Track.cxx @@ -89,8 +89,8 @@ void Track::addHit(TObject* hit) { } void Track::applyCorrection(std::string var, double correction){ - if(var == "z0"){ - z0_ = z0_ - correction; + if(var == "track_z0"){ + z0_ = z0_ + correction; } if(var == "track_time"){ track_time_ = track_time_ + correction; diff --git a/processors/config/anaSimps_2016_cfg.py b/processors/config/anaSimps_2016_cfg.py index 64ed71f18..6d3f2ce1c 100644 --- a/processors/config/anaSimps_2016_cfg.py +++ b/processors/config/anaSimps_2016_cfg.py @@ -7,6 +7,9 @@ help="Make True to make vertex ana flat tuple", metavar="makeFlatTuple", default=1) base.parser.add_argument("-r", "--isRadPDG", type=int, dest="isRadPDG", help="Set radiative trident PDG ID", metavar="isRadPDG", default=622) +base.parser.add_argument("--pSmearingSeed", type=int, dest="pSmearingSeed", + help="Set job dependent momentum smearing seed", metavar="pSmearingSeed", default=42) + options = base.parser.parse_args() # Use the input file to set the output file name @@ -43,6 +46,8 @@ vtxana.parameters["hitColl"] = "SiClustersOnTrackOnPartOnUVtx" vtxana.parameters["analysis"] = "vertex" vtxana.parameters["vtxSelectionjson"] = os.environ['HPSTR_BASE']+"/analysis/selections/simps/vertexSelection_2016_simp_preselection.json" +#vtxana.parameters["vtxSelectionjson"] = os.environ['HPSTR_BASE']+"/analysis/selections/simps/vertexSelection_2016_simp_nocuts.json" +#vtxana.parameters["histoCfg"] = os.environ['HPSTR_BASE']+"/analysis/plotconfigs/tracking/simps/vtxAnalysis_2016_simp_reach_light.json" vtxana.parameters["histoCfg"] = os.environ['HPSTR_BASE']+"/analysis/plotconfigs/tracking/simps/vtxAnalysis_2016_simp_reach.json" vtxana.parameters["mcHistoCfg"] = os.environ['HPSTR_BASE']+'/analysis/plotconfigs/mc/basicMC.json' ##### @@ -51,22 +56,16 @@ vtxana.parameters["isRadPDG"] = options.isRadPDG vtxana.parameters["makeFlatTuple"] = options.makeFlatTuple vtxana.parameters["beamPosCfg"] = "" -vtxana.parameters["pSmearingFile"] = os.environ['HPSTR_BASE']+"/utils/data/smearingFile_2016_all_12112023.root" -if options.isData: +if options.isData and options.year == 2016: vtxana.parameters["v0ProjectionFitsCfg"] = os.environ['HPSTR_BASE']+'/analysis/data/v0_projection_2016_config.json' -else: - vtxana.parameters["v0ProjectionFitsCfg"] = os.environ['HPSTR_BASE']+'/analysis/data/v0_projection_2016_mc_7800_config.json' #For tritrig and wab mc - #vtxana.parameters["v0ProjectionFitsCfg"] = os.environ['HPSTR_BASE']+'/analysis/data/v0_projection_2016_mc_signal_config.json' #For signal (accidentally gen with bspt=(0,0) - -if options.isData: - vtxana.parameters["eleTrackTimeBias"] = -1.5 - vtxana.parameters["posTrackTimeBias"] = -1.5 -else: - vtxana.parameters["eleTrackTimeBias"] = -2.2 #MC - vtxana.parameters["posTrackTimeBias"] = -2.2 #MC - #vtxana.parameters["eleTrackTimeBias"] = -5.5 #MC For TTs new smearing samples...due to readout bug - #vtxana.parameters["posTrackTimeBias"] = -5.5 #MC For TTs new smearing samples...due to readout bug - + vtxana.parameters["trackBiasCfg"] = os.environ['HPSTR_BASE']+'/utils/data/track_bias_corrections_data_2016.json' +elif not options.isData and options.year == 2016: + print('Running MC') + vtxana.parameters["trackBiasCfg"] = os.environ['HPSTR_BASE']+'/utils/data/track_bias_corrections_tritrig_2016.json' + #vtxana.parameters["pSmearingFile"] = os.environ['HPSTR_BASE']+'/utils/data/smearingFile_2016_all_12112023.root' + #vtxana.parameters["pSmearingSeed"] = options.pSmearingSeed + vtxana.parameters["v0ProjectionFitsCfg"] = os.environ['HPSTR_BASE']+'/utils/data/vertex_proj_beamspot_tritrig_2016.json' #For tritrig and wab mc + #vtxana.parameters["v0ProjectionFitsCfg"] = os.environ['HPSTR_BASE']+'/analysis/data/v0_projection_2016_mc_signal_config.json' #For signal (accidentally gen with bspt=(0,0) THIS NEEDS TO CHANGE AS OF 04/29/24. New samples have different beamspots CalTimeOffset = -999 @@ -75,7 +74,7 @@ print("Running on data file: Setting CalTimeOffset %d" % CalTimeOffset) elif (options.isData == 0): - CalTimeOffset = 43. + CalTimeOffset = 42.4 print("Running on MC file: Setting CalTimeOffset %d" % CalTimeOffset) else: print("Specify which type of ntuple you are running on: -t 1 [for Data] / -t 0 [for MC]") @@ -85,12 +84,12 @@ RegionPath = os.environ['HPSTR_BASE']+"/analysis/selections/simps/" if (options.year == 2016): RegionDefinitions = [RegionPath+'Tight_2016_simp_reach_CR.json', - RegionPath+'Tight_2016_simp_reach_SR.json', + RegionPath+'Tight_2016_simp_SR_analysis.json', RegionPath+'Tight_nocuts.json', RegionPath+'Tight_L1L1_nvtx1.json'] if(options.isData != 1): RegionDefinitions.extend([RegionPath+'radMatchTight_2016_simp_reach_CR.json', - RegionPath+'radMatchTight_2016_simp_reach_SR.json']) + RegionPath+'radMatchTight_2016_simp_SR_analysis.json']) vtxana.parameters["regionDefinitions"] = RegionDefinitions diff --git a/processors/include/NewVertexAnaProcessor.h b/processors/include/NewVertexAnaProcessor.h index 0c2a5df45..bc08b679e 100644 --- a/processors/include/NewVertexAnaProcessor.h +++ b/processors/include/NewVertexAnaProcessor.h @@ -117,6 +117,7 @@ class NewVertexAnaProcessor : public Processor { TTree* tree_{nullptr}; //!< description std::string pSmearingFile_{""}; + int pSmearingSeed_{42}; std::shared_ptr smearingTool_; std::string pBiasingFile_{""}; std::shared_ptr biasingTool_; @@ -150,6 +151,9 @@ class NewVertexAnaProcessor : public Processor { std::vector beamPosCorrections_ = {0.0,0.0,0.0}; //!< holds beam position corrections std::string v0ProjectionFitsCfg_{""};//!< json file w run dependent v0 projection fits json v0proj_fits_;//!< json object v0proj + std::string trackBiasCfg_{""}; //!< json containing track bias corrections + json tbc_configs_; // trackBiasCorrections_; double eleTrackTimeBias_ = 0.0; double posTrackTimeBias_ = 0.0; diff --git a/processors/src/NewVertexAnaProcessor.cxx b/processors/src/NewVertexAnaProcessor.cxx index b8351150a..d33861638 100644 --- a/processors/src/NewVertexAnaProcessor.cxx +++ b/processors/src/NewVertexAnaProcessor.cxx @@ -18,1504 +18,1546 @@ NewVertexAnaProcessor::NewVertexAnaProcessor(const std::string& name, Process& p NewVertexAnaProcessor::~NewVertexAnaProcessor(){} void NewVertexAnaProcessor::configure(const ParameterSet& parameters) { - std::cout << "Configuring NewVertexAnaProcessor" <(); - - vtxSelector = std::make_shared(anaName_+"_"+"vtxSelection",selectionCfg_); - vtxSelector->setDebug(debug_); - vtxSelector->LoadSelection(); - - _vtx_histos = std::make_shared(anaName_+"_"+"vtxSelection"); - _vtx_histos->loadHistoConfig(histoCfg_); - _vtx_histos->DefineHistos(); - - if(!isData_){ - _mc_vtx_histos = std::make_shared(anaName_+"_mc_"+"vtxSelection"); - _mc_vtx_histos->loadHistoConfig(mcHistoCfg_); - _mc_vtx_histos->DefineHistos(); - _mc_vtx_histos->Define2DHistos(); - } - - //Ana corrections to misc parameters - /* - if(!anaCorrectionsCfg_.empty()){ - std::ifstream anac_file(anaCorrectionsCfg_); - anac_file >> anac_configs_; - anac_file.close(); - }*/ - - //Load Run Dependent V0 target projection fits from json - if(!v0ProjectionFitsCfg_.empty()){ - std::ifstream v0proj_file(v0ProjectionFitsCfg_); - v0proj_file >> v0proj_fits_; - v0proj_file.close(); - } - - //Run Dependent Corrections - //Beam Position - if(!beamPosCfg_.empty()){ - std::ifstream bpc_file(beamPosCfg_); - bpc_file >> bpc_configs_; - bpc_file.close(); - } - - // histos = new MCAnaHistos(anaName_); - //histos->loadHistoConfig(histCfgFilename_) - //histos->DefineHistos(); - //histos->Define2DHistos(); - - - //For each region initialize plots - - for (unsigned int i_reg = 0; i_reg < regionSelections_.size(); i_reg++) { - std::string regname = AnaHelpers::getFileName(regionSelections_[i_reg],false); - std::cout<<"Setting up region:: " << regname <(anaName_+"_"+regname, regionSelections_[i_reg]); - _reg_vtx_selectors[regname]->setDebug(debug_); - _reg_vtx_selectors[regname]->LoadSelection(); - - _reg_vtx_histos[regname] = std::make_shared(anaName_+"_"+regname); - _reg_vtx_histos[regname]->loadHistoConfig(histoCfg_); - _reg_vtx_histos[regname]->DefineHistos(); - - - if(!isData_){ - _reg_mc_vtx_histos[regname] = std::make_shared(anaName_+"_mc_"+regname); - _reg_mc_vtx_histos[regname]->loadHistoConfig(mcHistoCfg_); - _reg_mc_vtx_histos[regname]->DefineHistos(); - } - - //Build a flat tuple for vertex and track params - if (makeFlatTuple_){ - _reg_tuples[regname] = std::make_shared(anaName_+"_"+regname+"_tree"); - - //vtx vars - _reg_tuples[regname]->addVariable("unc_vtx_mass"); - _reg_tuples[regname]->addVariable("unc_vtx_z"); - _reg_tuples[regname]->addVariable("unc_vtx_chi2"); - _reg_tuples[regname]->addVariable("unc_vtx_psum"); - _reg_tuples[regname]->addVariable("unc_vtx_px"); - _reg_tuples[regname]->addVariable("unc_vtx_py"); - _reg_tuples[regname]->addVariable("unc_vtx_pz"); - _reg_tuples[regname]->addVariable("unc_vtx_x"); - _reg_tuples[regname]->addVariable("unc_vtx_y"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_pos_clus_dt"); - _reg_tuples[regname]->addVariable("run_number"); - _reg_tuples[regname]->addVariable("unc_vtx_cxx"); - _reg_tuples[regname]->addVariable("unc_vtx_cyy"); - _reg_tuples[regname]->addVariable("unc_vtx_czz"); - _reg_tuples[regname]->addVariable("unc_vtx_cyx"); - _reg_tuples[regname]->addVariable("unc_vtx_czy"); - _reg_tuples[regname]->addVariable("unc_vtx_czx"); - _reg_tuples[regname]->addVariable("unc_vtx_proj_x"); - _reg_tuples[regname]->addVariable("unc_vtx_proj_y"); - _reg_tuples[regname]->addVariable("unc_vtx_proj_x_sig"); - _reg_tuples[regname]->addVariable("unc_vtx_proj_y_sig"); - _reg_tuples[regname]->addVariable("unc_vtx_proj_sig"); - _reg_tuples[regname]->addVariable("unc_vtx_deltaZ"); - - - //track vars - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_p"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_t"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_d0"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_phi0"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_omega"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_tanLambda"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_z0"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_chi2ndf"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_clust_dt"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_z0Err"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_d0Err"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_tanLambdaErr"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_PhiErr"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_OmegaErr"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_L1_isolation"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_nhits"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_lastlayer"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_si0"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_si1"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_ecal_x"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_ecal_y"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_z"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_px"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_py"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_pz"); - - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_clust_dt"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_p"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_t"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_d0"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_phi0"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_omega"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_tanLambda"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_z0"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_chi2ndf"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_z0Err"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_d0Err"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_tanLambdaErr"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_PhiErr"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_OmegaErr"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_L1_isolation"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_nhits"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_lastlayer"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_si0"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_si1"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_ecal_x"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_ecal_y"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_z"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_px"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_py"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_pz"); - - //clust vars - _reg_tuples[regname]->addVariable("unc_vtx_ele_clust_E"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_clust_x"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_clust_corr_t"); - - _reg_tuples[regname]->addVariable("unc_vtx_pos_clust_E"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_clust_x"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_clust_corr_t"); - - if(!isData_) - { - _reg_tuples[regname]->addVariable("true_vtx_z"); - _reg_tuples[regname]->addVariable("true_vtx_mass"); - _reg_tuples[regname]->addVariable("ap_true_vtx_z"); - _reg_tuples[regname]->addVariable("ap_true_vtx_mass"); - _reg_tuples[regname]->addVariable("ap_true_vtx_energy"); - _reg_tuples[regname]->addVariable("vd_true_vtx_z"); - _reg_tuples[regname]->addVariable("vd_true_vtx_mass"); - _reg_tuples[regname]->addVariable("vd_true_vtx_energy"); - _reg_tuples[regname]->addVariable("hitCode"); - _reg_tuples[regname]->addVariable("L1hitCode"); - _reg_tuples[regname]->addVariable("L2hitCode"); - } - } - - _regions.push_back(regname); - } - - // Get list of branches in tree to help protect accessing them - int nBr = tree_->GetListOfBranches()->GetEntries(); - if (debug_) std::cout << "Tree has " << nBr << " branches" << std::endl; - for(int iBr = 0; iBr < nBr; iBr++) - { - TBranch *br = dynamic_cast(tree_->GetListOfBranches()->At(iBr)); - brMap_.insert(std::map::value_type(br->GetName(), 1)); - if (debug_) std::cout << br->GetName() << ": " << brMap_[br->GetName()] << std::endl; - } - - //init Reading Tree - tree_->SetBranchAddress("EventHeader", &evth_ , &bevth_); - if (brMap_.find(tsColl_.c_str()) != brMap_.end()) tree_->SetBranchAddress(tsColl_.c_str(), &ts_ , &bts_); - tree_->SetBranchAddress(vtxColl_.c_str(), &vtxs_ , &bvtxs_); - //tree_->SetBranchAddress(hitColl_.c_str(), &hits_ , &bhits_); - if (brMap_.find(hitColl_.c_str()) != brMap_.end()) tree_->SetBranchAddress(hitColl_.c_str(), &hits_ , &bhits_); - if(!isData_ && !mcColl_.empty()) tree_->SetBranchAddress(mcColl_.c_str() , &mcParts_, &bmcParts_); - - if (not pSmearingFile_.empty()) { - // just using the same seed=42 for now - smearingTool_ = std::make_shared(pSmearingFile_,true); - } - - if (not pBiasingFile_.empty()) { - biasingTool_ = std::make_shared(pBiasingFile_); - } - -} + tree_ = tree; + _ah = std::make_shared(); + + vtxSelector = std::make_shared(anaName_+"_"+"vtxSelection",selectionCfg_); + vtxSelector->setDebug(debug_); + vtxSelector->LoadSelection(); + + _vtx_histos = std::make_shared(anaName_+"_"+"vtxSelection"); + _vtx_histos->loadHistoConfig(histoCfg_); + _vtx_histos->DefineHistos(); + + if(!isData_){ + _mc_vtx_histos = std::make_shared(anaName_+"_mc_"+"vtxSelection"); + _mc_vtx_histos->loadHistoConfig(mcHistoCfg_); + _mc_vtx_histos->DefineHistos(); + _mc_vtx_histos->Define2DHistos(); + } -bool NewVertexAnaProcessor::process(IEvent* ievent) { - if(debug_) { - std:: cout << "----------------- Event " << evth_->getEventNumber() << " -----------------" << std::endl; - } - HpsEvent* hps_evt = (HpsEvent*) ievent; - double weight = 1.; - int run_number = evth_->getRunNumber(); - int closest_run; - if (debug_) std::cout << "Check pbc_configs" << std::endl; - if(!bpc_configs_.empty()){ - for(auto run : bpc_configs_.items()){ - int check_run = std::stoi(run.key()); - if(check_run > run_number) - break; - else{ - closest_run = check_run; - } - } - beamPosCorrections_ = {bpc_configs_[std::to_string(closest_run)]["beamspot_x"], - bpc_configs_[std::to_string(closest_run)]["beamspot_y"], - bpc_configs_[std::to_string(closest_run)]["beamspot_z"]}; - } - - - //Get "true" values - //AP - double apMass = -0.9; - double apZ = -0.9; - double apEnergy = -0.9; - //Simp - double vdMass = -0.9; - double vdZ = -0.9; - double vdEnergy = -0.9; - - if (debug_) std::cout << "plot trigger info" << std::endl; - //Plot info about which trigger bits are present in the event - if (ts_ != nullptr) - { - _vtx_histos->Fill2DHisto("trig_count_hh", - ((int)ts_->prescaled.Single_3_Top)+((int)ts_->prescaled.Single_3_Bot), - ((int)ts_->prescaled.Single_2_Top)+((int)ts_->prescaled.Single_2_Bot)); - } - if (debug_) std::cout << "plot vtx N" << std::endl; - _vtx_histos->Fill1DHisto("n_vtx_h", vtxs_->size()); - - if (mcParts_) { - for(int i = 0; i < mcParts_->size(); i++) - { - if(mcParts_->at(i)->getPDG() == 622) - { - apMass = mcParts_->at(i)->getMass(); - apZ = mcParts_->at(i)->getVertexPosition().at(2); - apEnergy = mcParts_->at(i)->getEnergy(); - } - if(mcParts_->at(i)->getPDG() == 625) - { - vdMass = mcParts_->at(i)->getMass(); - vdZ = mcParts_->at(i)->getVertexPosition().at(2); - vdEnergy = mcParts_->at(i)->getEnergy(); - } - } - - if (!isData_) _mc_vtx_histos->FillMCParticles(mcParts_, analysis_); - } - //Store processed number of events - std::vector selected_vtxs; - bool passVtxPresel = false; - - if(debug_){ - std::cout<<"Number of vertices found in event: "<< vtxs_->size()<size(); i_vtx++ ) { - vtxSelector->getCutFlowHisto()->Fill(0.,weight); - - Vertex* vtx = vtxs_->at(i_vtx); - Particle* ele = nullptr; - Particle* pos = nullptr; - - //Trigger requirement - *really hate* having to do it here for each vertex. - - if (isData_) { - if (!vtxSelector->passCutEq("Pair1_eq",(int)evth_->isPair1Trigger(),weight)) - break; - if (!vtxSelector->passCutEq("Single0_eq",(int)evth_->isSingle0Trigger(),weight)) - break; - } - - bool foundParts = _ah->GetParticlesFromVtx(vtx,ele,pos); - if (!foundParts) { - if(debug_) std::cout<<"NewVertexAnaProcessor::WARNING::Found vtx without ele/pos. Skip."<getTrack(); - Track pos_trk = pos->getTrack(); - - if (debug_) { - std::cout<<"Check Ele/Pos Track momenta"<> anac_configs_; + anac_file.close(); + }*/ + + //Load Run Dependent V0 target projection fits from json + if(!v0ProjectionFitsCfg_.empty()){ + std::ifstream v0proj_file(v0ProjectionFitsCfg_); + v0proj_file >> v0proj_fits_; + v0proj_file.close(); } - // Beam Position Corrections - ele_trk.applyCorrection("z0", beamPosCorrections_.at(1)); - pos_trk.applyCorrection("z0", beamPosCorrections_.at(1)); - // Track Time Corrections - ele_trk.applyCorrection("track_time",eleTrackTimeBias_); - pos_trk.applyCorrection("track_time", posTrackTimeBias_); - - // Correct for the momentum bias - - if (biasingTool_) { - - // Correct for wrong track momentum - Bug Fix - // In case there was mis-configuration during reco/hpstr-ntuple step, correct - // the momentum magnitude here using the right bField for the data taking year - - if (bFieldScaleFactor_ > 0) { - biasingTool_->updateWithBiasP(ele_trk,bFieldScaleFactor_); - biasingTool_->updateWithBiasP(pos_trk,bFieldScaleFactor_); - } - - biasingTool_->updateWithBiasP(ele_trk); - biasingTool_->updateWithBiasP(pos_trk); + //Run Dependent Corrections + //Beam Position + if(!beamPosCfg_.empty()){ + std::ifstream bpc_file(beamPosCfg_); + bpc_file >> bpc_configs_; + bpc_file.close(); } - if (debug_) { - std::cout<<"Corrected Ele/Pos Track momenta"<> tbc_configs_; + tbc_file.close(); } - - double invm_smear = 1.; - if (smearingTool_) { - double unsmeared_prod = ele_trk.getP()*pos_trk.getP(); - smearingTool_->updateWithSmearP(ele_trk); - smearingTool_->updateWithSmearP(pos_trk); - double smeared_prod = ele_trk.getP()*pos_trk.getP(); - invm_smear = sqrt(smeared_prod/unsmeared_prod); - } - - //Add the momenta to the tracks - do not do that - //ele_trk.setMomentum(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); - //pos_trk.setMomentum(pos->getMomentum()[0],pos->getMomentum()[1],pos->getMomentum()[2]); - double ele_E = ele->getEnergy(); - double pos_E = pos->getEnergy(); - if (debug_) std::cout << "got tracks" << std::endl; - CalCluster eleClus = ele->getCluster(); - CalCluster posClus = pos->getCluster(); + // histos = new MCAnaHistos(anaName_); + //histos->loadHistoConfig(histCfgFilename_) + //histos->DefineHistos(); + //histos->Define2DHistos(); + + + //For each region initialize plots + + for (unsigned int i_reg = 0; i_reg < regionSelections_.size(); i_reg++) { + std::string regname = AnaHelpers::getFileName(regionSelections_[i_reg],false); + std::cout<<"Setting up region:: " << regname <(anaName_+"_"+regname, regionSelections_[i_reg]); + _reg_vtx_selectors[regname]->setDebug(debug_); + _reg_vtx_selectors[regname]->LoadSelection(); + + _reg_vtx_histos[regname] = std::make_shared(anaName_+"_"+regname); + _reg_vtx_histos[regname]->loadHistoConfig(histoCfg_); + _reg_vtx_histos[regname]->DefineHistos(); + + + if(!isData_){ + _reg_mc_vtx_histos[regname] = std::make_shared(anaName_+"_mc_"+regname); + _reg_mc_vtx_histos[regname]->loadHistoConfig(mcHistoCfg_); + _reg_mc_vtx_histos[regname]->DefineHistos(); + } + + //Build a flat tuple for vertex and track params + if (makeFlatTuple_){ + _reg_tuples[regname] = std::make_shared(anaName_+"_"+regname+"_tree"); + + //vtx vars + _reg_tuples[regname]->addVariable("unc_vtx_mass"); + _reg_tuples[regname]->addVariable("unc_vtx_z"); + _reg_tuples[regname]->addVariable("unc_vtx_chi2"); + _reg_tuples[regname]->addVariable("unc_vtx_psum"); + _reg_tuples[regname]->addVariable("unc_vtx_px"); + _reg_tuples[regname]->addVariable("unc_vtx_py"); + _reg_tuples[regname]->addVariable("unc_vtx_pz"); + _reg_tuples[regname]->addVariable("unc_vtx_x"); + _reg_tuples[regname]->addVariable("unc_vtx_y"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_pos_clus_dt"); + _reg_tuples[regname]->addVariable("run_number"); + _reg_tuples[regname]->addVariable("unc_vtx_cxx"); + _reg_tuples[regname]->addVariable("unc_vtx_cyy"); + _reg_tuples[regname]->addVariable("unc_vtx_czz"); + _reg_tuples[regname]->addVariable("unc_vtx_cyx"); + _reg_tuples[regname]->addVariable("unc_vtx_czy"); + _reg_tuples[regname]->addVariable("unc_vtx_czx"); + _reg_tuples[regname]->addVariable("unc_vtx_proj_x"); + _reg_tuples[regname]->addVariable("unc_vtx_proj_y"); + _reg_tuples[regname]->addVariable("unc_vtx_proj_x_sig"); + _reg_tuples[regname]->addVariable("unc_vtx_proj_y_sig"); + _reg_tuples[regname]->addVariable("unc_vtx_proj_sig"); + _reg_tuples[regname]->addVariable("unc_vtx_deltaZ"); + + + //track vars + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_p"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_t"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_d0"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_phi0"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_omega"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_tanLambda"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_z0"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_chi2ndf"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_clust_dt"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_z0Err"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_d0Err"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_tanLambdaErr"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_PhiErr"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_OmegaErr"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_L1_isolation"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_nhits"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_lastlayer"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_si0"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_si1"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_ecal_x"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_ecal_y"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_z"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_px"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_py"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_pz"); + + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_clust_dt"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_p"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_t"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_d0"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_phi0"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_omega"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_tanLambda"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_z0"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_chi2ndf"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_z0Err"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_d0Err"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_tanLambdaErr"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_PhiErr"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_OmegaErr"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_L1_isolation"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_nhits"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_lastlayer"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_si0"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_si1"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_ecal_x"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_ecal_y"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_z"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_px"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_py"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_pz"); + + //clust vars + _reg_tuples[regname]->addVariable("unc_vtx_ele_clust_E"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_clust_x"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_clust_corr_t"); + + _reg_tuples[regname]->addVariable("unc_vtx_pos_clust_E"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_clust_x"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_clust_corr_t"); + + if(!isData_) + { + _reg_tuples[regname]->addVariable("true_vtx_z"); + _reg_tuples[regname]->addVariable("true_vtx_mass"); + _reg_tuples[regname]->addVariable("ap_true_vtx_z"); + _reg_tuples[regname]->addVariable("ap_true_vtx_mass"); + _reg_tuples[regname]->addVariable("ap_true_vtx_energy"); + _reg_tuples[regname]->addVariable("vd_true_vtx_z"); + _reg_tuples[regname]->addVariable("vd_true_vtx_mass"); + _reg_tuples[regname]->addVariable("vd_true_vtx_energy"); + _reg_tuples[regname]->addVariable("hitCode"); + _reg_tuples[regname]->addVariable("L1hitCode"); + _reg_tuples[regname]->addVariable("L2hitCode"); + } + } + + _regions.push_back(regname); + } + // Get list of branches in tree to help protect accessing them + int nBr = tree_->GetListOfBranches()->GetEntries(); + if (debug_) std::cout << "Tree has " << nBr << " branches" << std::endl; + for(int iBr = 0; iBr < nBr; iBr++) + { + TBranch *br = dynamic_cast(tree_->GetListOfBranches()->At(iBr)); + brMap_.insert(std::map::value_type(br->GetName(), 1)); + if (debug_) std::cout << br->GetName() << ": " << brMap_[br->GetName()] << std::endl; + } - //Compute analysis variables here. - TLorentzVector p_ele; - p_ele.SetPxPyPzE(ele_trk.getMomentum()[0],ele_trk.getMomentum()[1],ele_trk.getMomentum()[2],ele->getEnergy()); - TLorentzVector p_pos; - p_pos.SetPxPyPzE(pos_trk.getMomentum()[0],pos_trk.getMomentum()[1],pos_trk.getMomentum()[2],ele->getEnergy()); + //init Reading Tree + tree_->SetBranchAddress("EventHeader", &evth_ , &bevth_); + if (brMap_.find(tsColl_.c_str()) != brMap_.end()) tree_->SetBranchAddress(tsColl_.c_str(), &ts_ , &bts_); + tree_->SetBranchAddress(vtxColl_.c_str(), &vtxs_ , &bvtxs_); + //tree_->SetBranchAddress(hitColl_.c_str(), &hits_ , &bhits_); + if (brMap_.find(hitColl_.c_str()) != brMap_.end()) tree_->SetBranchAddress(hitColl_.c_str(), &hits_ , &bhits_); + if(!isData_ && !mcColl_.empty()) tree_->SetBranchAddress(mcColl_.c_str() , &mcParts_, &bmcParts_); - //Tracks in opposite volumes - useless - //if (!vtxSelector->passCutLt("eleposTanLambaProd_lt",ele_trk.getTanLambda() * pos_trk.getTanLambda(),weight)) - // continue; + if (not pSmearingFile_.empty()) { + smearingTool_ = std::make_shared(pSmearingFile_,true, pSmearingSeed_); + } - if (debug_) std::cout << "start selection" << std::endl; - //Ele Track Time - if (!vtxSelector->passCutLt("eleTrkTime_lt",fabs(ele_trk.getTrackTime()),weight)) - continue; + if (not pBiasingFile_.empty()) { + biasingTool_ = std::make_shared(pBiasingFile_); + } - //Pos Track Time - if (!vtxSelector->passCutLt("posTrkTime_lt",fabs(pos_trk.getTrackTime()),weight)) - continue; +} - //Ele Track-cluster match - if (!vtxSelector->passCutLt("eleTrkCluMatch_lt",ele->getGoodnessOfPID(),weight)) - continue; +bool NewVertexAnaProcessor::process(IEvent* ievent) { + if(debug_) { + std:: cout << "----------------- Event " << evth_->getEventNumber() << " -----------------" << std::endl; + } + HpsEvent* hps_evt = (HpsEvent*) ievent; + double weight = 1.; + int run_number = evth_->getRunNumber(); + int closest_run; + if (debug_) std::cout << "Check pbc_configs" << std::endl; + if(!bpc_configs_.empty()){ + for(auto run : bpc_configs_.items()){ + int check_run = std::stoi(run.key()); + if(check_run > run_number) + break; + else{ + closest_run = check_run; + } + } + beamPosCorrections_ = {bpc_configs_[std::to_string(closest_run)]["unrotated_mean_x"], + bpc_configs_[std::to_string(closest_run)]["unrotated_mean_y"]}; + } - //Pos Track-cluster match - if (!vtxSelector->passCutLt("posTrkCluMatch_lt",pos->getGoodnessOfPID(),weight)) - continue; - - //Require Positron Cluster exists - if (!vtxSelector->passCutGt("posClusE_gt",posClus.getEnergy(),weight)) - continue; - - //Require Positron Cluster does NOT exists - if (!vtxSelector->passCutLt("posClusE_lt",posClus.getEnergy(),weight)) - continue; + //Load track parameter bias corrections if specified + if(!tbc_configs_.empty()){ + for(auto entry : tbc_configs_.items()){ + trackBiasCorrections_[entry.key()] = entry.value(); + } + } - double corr_eleClusterTime = ele->getCluster().getTime() - timeOffset_; - double corr_posClusterTime = pos->getCluster().getTime() - timeOffset_; + //Get "true" values + //AP + double apMass = -0.9; + double apZ = -0.9; + double apEnergy = -0.9; + //Simp + double vdMass = -0.9; + double vdZ = -0.9; + double vdEnergy = -0.9; + + if (debug_) std::cout << "plot trigger info" << std::endl; + //Plot info about which trigger bits are present in the event + if (ts_ != nullptr) + { + _vtx_histos->Fill2DHisto("trig_count_hh", + ((int)ts_->prescaled.Single_3_Top)+((int)ts_->prescaled.Single_3_Bot), + ((int)ts_->prescaled.Single_2_Top)+((int)ts_->prescaled.Single_2_Bot)); + } + if (debug_) std::cout << "plot vtx N" << std::endl; + _vtx_histos->Fill1DHisto("n_vtx_h", vtxs_->size()); + + if (mcParts_) { + for(int i = 0; i < mcParts_->size(); i++) + { + if(mcParts_->at(i)->getPDG() == 622) + { + apMass = mcParts_->at(i)->getMass(); + apZ = mcParts_->at(i)->getVertexPosition().at(2); + apEnergy = mcParts_->at(i)->getEnergy(); + } + if(mcParts_->at(i)->getPDG() == 625) + { + vdMass = mcParts_->at(i)->getMass(); + vdZ = mcParts_->at(i)->getVertexPosition().at(2); + vdEnergy = mcParts_->at(i)->getEnergy(); + } + } + + if (!isData_) _mc_vtx_histos->FillMCParticles(mcParts_, analysis_); + } + //Store processed number of events + std::vector selected_vtxs; + bool passVtxPresel = false; + + if(debug_){ + std::cout<<"Number of vertices found in event: "<< vtxs_->size()<size(); i_vtx++ ) { + vtxSelector->getCutFlowHisto()->Fill(0.,weight); + + Vertex* vtx = vtxs_->at(i_vtx); + Particle* ele = nullptr; + Particle* pos = nullptr; + + //Trigger requirement - *really hate* having to do it here for each vertex. + + if (isData_) { + if (!vtxSelector->passCutEq("Pair1_eq",(int)evth_->isPair1Trigger(),weight)) + break; + if (!vtxSelector->passCutEq("Single0_eq",(int)evth_->isSingle0Trigger(),weight)) + break; + } + + bool foundParts = _ah->GetParticlesFromVtx(vtx,ele,pos); + if (!foundParts) { + if(debug_) std::cout<<"NewVertexAnaProcessor::WARNING::Found vtx without ele/pos. Skip."<getTrack(); + Track pos_trk = pos->getTrack(); + + if (debug_) { + std::cout<<"Check Ele/Pos Track momenta"< 0) { + biasingTool_->updateWithBiasP(ele_trk,bFieldScaleFactor_); + biasingTool_->updateWithBiasP(pos_trk,bFieldScaleFactor_); + } + + biasingTool_->updateWithBiasP(ele_trk); + biasingTool_->updateWithBiasP(pos_trk); + } + + if (debug_) { + std::cout<<"Corrected Ele/Pos Track momenta"<updateWithSmearP(ele_trk); + double pos_smf = smearingTool_->updateWithSmearP(pos_trk); + double smeared_prod = ele_trk.getP()*pos_trk.getP(); + invm_smear = sqrt(smeared_prod/unsmeared_prod); + smearingTool_->updateVertexWithSmearP(vtx, ele_smf, pos_smf); + } + //std::cout << "[Before Preselection] ele track p after smearing: " << ele_trk.getP() << std::endl; + + + //Add the momenta to the tracks - do not do that + //ele_trk.setMomentum(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); + //pos_trk.setMomentum(pos->getMomentum()[0],pos->getMomentum()[1],pos->getMomentum()[2]); + + double ele_E = ele->getEnergy(); + double pos_E = pos->getEnergy(); + if (debug_) std::cout << "got tracks" << std::endl; + + CalCluster eleClus = ele->getCluster(); + CalCluster posClus = pos->getCluster(); + + + //Compute analysis variables here. + TLorentzVector p_ele; + p_ele.SetPxPyPzE(ele_trk.getMomentum()[0],ele_trk.getMomentum()[1],ele_trk.getMomentum()[2],ele->getEnergy()); + TLorentzVector p_pos; + p_pos.SetPxPyPzE(pos_trk.getMomentum()[0],pos_trk.getMomentum()[1],pos_trk.getMomentum()[2],ele->getEnergy()); + + //Tracks in opposite volumes - useless + //if (!vtxSelector->passCutLt("eleposTanLambaProd_lt",ele_trk.getTanLambda() * pos_trk.getTanLambda(),weight)) + // continue; + + if (debug_) std::cout << "start selection" << std::endl; + //Ele Track Time + if (!vtxSelector->passCutLt("eleTrkTime_lt",fabs(ele_trk.getTrackTime()),weight)) + continue; + + //Pos Track Time + if (!vtxSelector->passCutLt("posTrkTime_lt",fabs(pos_trk.getTrackTime()),weight)) + continue; + + //Ele Track-cluster match + if (!vtxSelector->passCutLt("eleTrkCluMatch_lt",ele->getGoodnessOfPID(),weight)) + continue; + + //Pos Track-cluster match + if (!vtxSelector->passCutLt("posTrkCluMatch_lt",pos->getGoodnessOfPID(),weight)) + continue; + + //Require Positron Cluster exists + if (!vtxSelector->passCutGt("posClusE_gt",posClus.getEnergy(),weight)) + continue; + + //Require Positron Cluster does NOT exists + if (!vtxSelector->passCutLt("posClusE_lt",posClus.getEnergy(),weight)) + continue; + + + double corr_eleClusterTime = ele->getCluster().getTime() - timeOffset_; + double corr_posClusterTime = pos->getCluster().getTime() - timeOffset_; + + double botClusTime = 0.0; + if(ele->getCluster().getPosition().at(1) < 0.0) botClusTime = ele->getCluster().getTime(); + else botClusTime = pos->getCluster().getTime(); + + //Bottom Cluster Time + if (!vtxSelector->passCutLt("botCluTime_lt", botClusTime, weight)) + continue; - double botClusTime = 0.0; - if(ele->getCluster().getPosition().at(1) < 0.0) botClusTime = ele->getCluster().getTime(); - else botClusTime = pos->getCluster().getTime(); + if (!vtxSelector->passCutGt("botCluTime_gt", botClusTime, weight)) + continue; - //Bottom Cluster Time - if (!vtxSelector->passCutLt("botCluTime_lt", botClusTime, weight)) - continue; + //Ele Pos Cluster Time Difference + if (!vtxSelector->passCutLt("eleposCluTimeDiff_lt",fabs(corr_eleClusterTime - corr_posClusterTime),weight)) + continue; - if (!vtxSelector->passCutGt("botCluTime_gt", botClusTime, weight)) - continue; + //Ele Track-Cluster Time Difference + if (!vtxSelector->passCutLt("eleTrkCluTimeDiff_lt",fabs(ele_trk.getTrackTime() - corr_eleClusterTime),weight)) + continue; - //Ele Pos Cluster Time Difference - if (!vtxSelector->passCutLt("eleposCluTimeDiff_lt",fabs(corr_eleClusterTime - corr_posClusterTime),weight)) - continue; + //Pos Track-Cluster Time Difference + if (!vtxSelector->passCutLt("posTrkCluTimeDiff_lt",fabs(pos_trk.getTrackTime() - corr_posClusterTime),weight)) + continue; - //Ele Track-Cluster Time Difference - if (!vtxSelector->passCutLt("eleTrkCluTimeDiff_lt",fabs(ele_trk.getTrackTime() - corr_eleClusterTime),weight)) - continue; + TVector3 ele_mom; + //ele_mom.SetX(ele->getMomentum()[0]); + //ele_mom.SetY(ele->getMomentum()[1]); + //ele_mom.SetZ(ele->getMomentum()[2]); + ele_mom.SetX(ele_trk.getMomentum()[0]); + ele_mom.SetY(ele_trk.getMomentum()[1]); + ele_mom.SetZ(ele_trk.getMomentum()[2]); + + + TVector3 pos_mom; + //pos_mom.SetX(pos->getMomentum()[0]); + //pos_mom.SetY(pos->getMomentum()[1]); + //pos_mom.SetZ(pos->getMomentum()[2]); + pos_mom.SetX(pos_trk.getMomentum()[0]); + pos_mom.SetY(pos_trk.getMomentum()[1]); + pos_mom.SetZ(pos_trk.getMomentum()[2]); + + + + //Ele Track Quality - Chi2 + if (!vtxSelector->passCutLt("eleTrkChi2_lt",ele_trk.getChi2(),weight)) + continue; + + //Pos Track Quality - Chi2 + if (!vtxSelector->passCutLt("posTrkChi2_lt",pos_trk.getChi2(),weight)) + continue; + + //Ele Track Quality - Chi2Ndf + if (!vtxSelector->passCutLt("eleTrkChi2Ndf_lt",ele_trk.getChi2Ndf(),weight)) + continue; + + //Pos Track Quality - Chi2Ndf + if (!vtxSelector->passCutLt("posTrkChi2Ndf_lt",pos_trk.getChi2Ndf(),weight)) + continue; + + //Beam Electron cut + if (!vtxSelector->passCutLt("eleMom_lt",ele_mom.Mag(),weight)) + continue; + + //Ele min momentum cut + if (!vtxSelector->passCutGt("eleMom_gt",ele_mom.Mag(),weight)) + continue; + + //Pos min momentum cut + if (!vtxSelector->passCutGt("posMom_gt",pos_mom.Mag(),weight)) + continue; + + //Ele nHits + int ele2dHits = ele_trk.getTrackerHitCount(); + if (!ele_trk.isKalmanTrack()) + ele2dHits*=2; + + if (!vtxSelector->passCutGt("eleN2Dhits_gt",ele2dHits,weight)) { + continue; + } + + //Pos nHits + int pos2dHits = pos_trk.getTrackerHitCount(); + if (!pos_trk.isKalmanTrack()) + pos2dHits*=2; + + if (!vtxSelector->passCutGt("posN2Dhits_gt",pos2dHits,weight)) { + continue; + } + + //Less than 4 shared hits for ele/pos track + if (!vtxSelector->passCutLt("eleNshared_lt",ele_trk.getNShared(),weight)) { + continue; + } + + if (!vtxSelector->passCutLt("posNshared_lt",pos_trk.getNShared(),weight)) { + continue; + } + + + //Vertex Quality + if (!vtxSelector->passCutLt("chi2unc_lt",vtx->getChi2(),weight)) + continue; + + //Max vtx momentum + if (!vtxSelector->passCutLt("maxVtxMom_lt",(ele_mom+pos_mom).Mag(),weight)) + continue; + + //Min vtx momentum + + if (!vtxSelector->passCutGt("minVtxMom_gt",(ele_mom+pos_mom).Mag(),weight)) + continue; + + if (debug_) std::cout << "fill 1D Vertex" << std::endl; + _vtx_histos->Fill1DVertex(vtx, + ele, + pos, + &ele_trk, + &pos_trk, + weight); + + if (debug_) std::cout << "fill track histos" << std::endl; + double ele_pos_dt = corr_eleClusterTime - corr_posClusterTime; + double psum = ele_mom.Mag()+pos_mom.Mag(); + + _vtx_histos->Fill1DTrack(&ele_trk,weight, "ele_"); + _vtx_histos->Fill1DTrack(&pos_trk,weight, "pos_"); + _vtx_histos->Fill1DHisto("ele_track_n2dhits_h", ele2dHits, weight); + _vtx_histos->Fill1DHisto("pos_track_n2dhits_h", pos2dHits, weight); + _vtx_histos->Fill1DHisto("vtx_Psum_h", p_ele.P()+p_pos.P(), weight); + _vtx_histos->Fill1DHisto("vtx_Esum_h", ele_E + pos_E, weight); + _vtx_histos->Fill1DHisto("vtx_smear_InvM_h", invm_smear*(vtx->getInvMass()), weight); + _vtx_histos->Fill1DHisto("ele_pos_clusTimeDiff_h", (corr_eleClusterTime - corr_posClusterTime), weight); + _vtx_histos->Fill2DHisto("ele_vtxZ_iso_hh", TMath::Min(ele_trk.getIsolation(0), ele_trk.getIsolation(1)), vtx->getZ(), weight); + _vtx_histos->Fill2DHisto("pos_vtxZ_iso_hh", TMath::Min(pos_trk.getIsolation(0), pos_trk.getIsolation(1)), vtx->getZ(), weight); + _vtx_histos->Fill2DHistograms(vtx,weight); + _vtx_histos->Fill2DTrack(&ele_trk,weight,"ele_"); + _vtx_histos->Fill2DTrack(&pos_trk,weight,"pos_"); + _vtx_histos->Fill1DHisto("mcMass622_h",apMass); + _vtx_histos->Fill1DHisto("mcZ622_h",apZ); + + //New SIMP histos for developing loose preselection cuts + //2d histos + _vtx_histos->Fill2DHisto("ele_clusT_v_ele_trackT_hh", ele_trk.getTrackTime(), corr_eleClusterTime, weight); + _vtx_histos->Fill2DHisto("pos_clusT_v_pos_trackT_hh", pos_trk.getTrackTime(), corr_posClusterTime, weight); + _vtx_histos->Fill2DHisto("ele_track_time_v_P_hh", ele_trk.getP(), ele_trk.getTrackTime(), weight); + _vtx_histos->Fill2DHisto("pos_track_time_v_P_hh", pos_trk.getP(), pos_trk.getTrackTime(), weight); + _vtx_histos->Fill2DHisto("ele_pos_clusTimeDiff_v_pSum_hh",ele_mom.Mag()+pos_mom.Mag(), ele_pos_dt, weight); + _vtx_histos->Fill2DHisto("ele_cluster_energy_v_track_p_hh",ele_trk.getP(), eleClus.getEnergy(), weight); + _vtx_histos->Fill2DHisto("pos_cluster_energy_v_track_p_hh",pos_trk.getP(), posClus.getEnergy(), weight); + _vtx_histos->Fill2DHisto("ele_track_cluster_dt_v_EoverP_hh",eleClus.getEnergy()/ele_trk.getP(), ele_trk.getTrackTime() - corr_eleClusterTime, weight); + _vtx_histos->Fill2DHisto("pos_track_cluster_dt_v_EoverP_hh",posClus.getEnergy()/pos_trk.getP(), pos_trk.getTrackTime() - corr_posClusterTime, weight); + _vtx_histos->Fill2DHisto("ele_track_clus_dt_v_p_hh",ele_trk.getP(), ele_trk.getTrackTime() - corr_eleClusterTime, weight); + _vtx_histos->Fill2DHisto("pos_track_clus_dt_v_p_hh",pos_trk.getP(), pos_trk.getTrackTime() - corr_posClusterTime, weight); + //chi2 2d plots + _vtx_histos->Fill2DHisto("ele_track_chi2ndf_v_time_hh", ele_trk.getTrackTime(), ele_trk.getChi2Ndf(), weight); + _vtx_histos->Fill2DHisto("ele_track_chi2ndf_v_p_hh", ele_trk.getP(), ele_trk.getChi2Ndf(), weight); + _vtx_histos->Fill2DHisto("ele_track_chi2ndf_v_tanlambda_hh", ele_trk.getTanLambda(), ele_trk.getChi2Ndf(), weight); + _vtx_histos->Fill2DHisto("ele_track_chi2ndf_v_n2dhits_hh", ele2dHits, ele_trk.getChi2Ndf(), weight); + + _vtx_histos->Fill2DHisto("pos_track_chi2ndf_v_time_hh", pos_trk.getTrackTime(), pos_trk.getChi2Ndf(), weight); + _vtx_histos->Fill2DHisto("pos_track_chi2ndf_v_p_hh", pos_trk.getP(), pos_trk.getChi2Ndf(), weight); + _vtx_histos->Fill2DHisto("pos_track_chi2ndf_v_tanlambda_hh", pos_trk.getTanLambda(), pos_trk.getChi2Ndf(), weight); + _vtx_histos->Fill2DHisto("pos_track_chi2ndf_v_n2dhits_hh", pos2dHits, pos_trk.getChi2Ndf(), weight); + + _vtx_histos->Fill2DHisto("ele_track_p_v_tanlambda_hh", ele_trk.getTanLambda(), ele_trk.getP(), weight); + _vtx_histos->Fill2DHisto("pos_track_p_v_tanlambda_hh", pos_trk.getTanLambda(), pos_trk.getP(), weight); + + + //1d histos + _vtx_histos->Fill1DHisto("ele_track_clus_dt_h", ele_trk.getTrackTime() - corr_eleClusterTime, weight); + _vtx_histos->Fill1DHisto("pos_track_clus_dt_h", pos_trk.getTrackTime() - corr_posClusterTime, weight); + + passVtxPresel = true; + + selected_vtxs.push_back(vtx); + vtxSelector->clearSelector(); + } + + // std::cout << "Number of selected vtxs: " << selected_vtxs.size() << std::endl; - //Pos Track-Cluster Time Difference - if (!vtxSelector->passCutLt("posTrkCluTimeDiff_lt",fabs(pos_trk.getTrackTime() - corr_posClusterTime),weight)) - continue; + _vtx_histos->Fill1DHisto("n_vertices_h",selected_vtxs.size()); - TVector3 ele_mom; - //ele_mom.SetX(ele->getMomentum()[0]); - //ele_mom.SetY(ele->getMomentum()[1]); - //ele_mom.SetZ(ele->getMomentum()[2]); - ele_mom.SetX(ele_trk.getMomentum()[0]); - ele_mom.SetY(ele_trk.getMomentum()[1]); - ele_mom.SetZ(ele_trk.getMomentum()[2]); - - - TVector3 pos_mom; - //pos_mom.SetX(pos->getMomentum()[0]); - //pos_mom.SetY(pos->getMomentum()[1]); - //pos_mom.SetZ(pos->getMomentum()[2]); - pos_mom.SetX(pos_trk.getMomentum()[0]); - pos_mom.SetY(pos_trk.getMomentum()[1]); - pos_mom.SetZ(pos_trk.getMomentum()[2]); - - - - //Ele Track Quality - Chi2 - if (!vtxSelector->passCutLt("eleTrkChi2_lt",ele_trk.getChi2(),weight)) - continue; - - //Pos Track Quality - Chi2 - if (!vtxSelector->passCutLt("posTrkChi2_lt",pos_trk.getChi2(),weight)) - continue; - - //Ele Track Quality - Chi2Ndf - if (!vtxSelector->passCutLt("eleTrkChi2Ndf_lt",ele_trk.getChi2Ndf(),weight)) - continue; - - //Pos Track Quality - Chi2Ndf - if (!vtxSelector->passCutLt("posTrkChi2Ndf_lt",pos_trk.getChi2Ndf(),weight)) - continue; - - //Beam Electron cut - if (!vtxSelector->passCutLt("eleMom_lt",ele_mom.Mag(),weight)) - continue; - - //Ele min momentum cut - if (!vtxSelector->passCutGt("eleMom_gt",ele_mom.Mag(),weight)) - continue; - - //Pos min momentum cut - if (!vtxSelector->passCutGt("posMom_gt",pos_mom.Mag(),weight)) - continue; - - //Ele nHits - int ele2dHits = ele_trk.getTrackerHitCount(); - if (!ele_trk.isKalmanTrack()) - ele2dHits*=2; - - if (!vtxSelector->passCutGt("eleN2Dhits_gt",ele2dHits,weight)) { - continue; - } - - //Pos nHits - int pos2dHits = pos_trk.getTrackerHitCount(); - if (!pos_trk.isKalmanTrack()) - pos2dHits*=2; - - if (!vtxSelector->passCutGt("posN2Dhits_gt",pos2dHits,weight)) { - continue; - } - - //Less than 4 shared hits for ele/pos track - if (!vtxSelector->passCutLt("eleNshared_lt",ele_trk.getNShared(),weight)) { - continue; - } - - if (!vtxSelector->passCutLt("posNshared_lt",pos_trk.getNShared(),weight)) { - continue; - } - - - //Vertex Quality - if (!vtxSelector->passCutLt("chi2unc_lt",vtx->getChi2(),weight)) - continue; - - //Max vtx momentum - if (!vtxSelector->passCutLt("maxVtxMom_lt",(ele_mom+pos_mom).Mag(),weight)) - continue; - - //Min vtx momentum - if (!vtxSelector->passCutGt("minVtxMom_gt",(ele_mom+pos_mom).Mag(),weight)) - continue; + //not working atm + //hps_evt->addVertexCollection("selected_vtxs", selected_vtxs); - if (debug_) std::cout << "fill 1D Vertex" << std::endl; - _vtx_histos->Fill1DVertex(vtx, - ele, - pos, - &ele_trk, - &pos_trk, - weight); + //Make Plots for each region: loop on each region. Check if the region has the cut and apply it + //TODO Clean this up => Cuts should be implemented in each region? + //TODO Bring the preselection out of this stupid loop - if (debug_) std::cout << "fill track histos" << std::endl; - double ele_pos_dt = corr_eleClusterTime - corr_posClusterTime; - double psum = ele_mom.Mag()+pos_mom.Mag(); - _vtx_histos->Fill1DTrack(&ele_trk,weight, "ele_"); - _vtx_histos->Fill1DTrack(&pos_trk,weight, "pos_"); - _vtx_histos->Fill1DHisto("ele_track_n2dhits_h", ele2dHits, weight); - _vtx_histos->Fill1DHisto("pos_track_n2dhits_h", pos2dHits, weight); - _vtx_histos->Fill1DHisto("vtx_Psum_h", p_ele.P()+p_pos.P(), weight); - _vtx_histos->Fill1DHisto("vtx_Esum_h", ele_E + pos_E, weight); - _vtx_histos->Fill1DHisto("vtx_smear_InvM_h", invm_smear*(vtx->getInvMass()), weight); - _vtx_histos->Fill1DHisto("ele_pos_clusTimeDiff_h", (corr_eleClusterTime - corr_posClusterTime), weight); - _vtx_histos->Fill2DHisto("ele_vtxZ_iso_hh", TMath::Min(ele_trk.getIsolation(0), ele_trk.getIsolation(1)), vtx->getZ(), weight); - _vtx_histos->Fill2DHisto("pos_vtxZ_iso_hh", TMath::Min(pos_trk.getIsolation(0), pos_trk.getIsolation(1)), vtx->getZ(), weight); - _vtx_histos->Fill2DHistograms(vtx,weight); - _vtx_histos->Fill2DTrack(&ele_trk,weight,"ele_"); - _vtx_histos->Fill2DTrack(&pos_trk,weight,"pos_"); - _vtx_histos->Fill1DHisto("mcMass622_h",apMass); - _vtx_histos->Fill1DHisto("mcZ622_h",apZ); - //New SIMP histos for developing loose preselection cuts - //2d histos - _vtx_histos->Fill2DHisto("ele_clusT_v_ele_trackT_hh", ele_trk.getTrackTime(), corr_eleClusterTime, weight); - _vtx_histos->Fill2DHisto("pos_clusT_v_pos_trackT_hh", pos_trk.getTrackTime(), corr_posClusterTime, weight); - _vtx_histos->Fill2DHisto("ele_track_time_v_P_hh", ele_trk.getP(), ele_trk.getTrackTime(), weight); - _vtx_histos->Fill2DHisto("pos_track_time_v_P_hh", pos_trk.getP(), pos_trk.getTrackTime(), weight); - _vtx_histos->Fill2DHisto("ele_pos_clusTimeDiff_v_pSum_hh",ele_mom.Mag()+pos_mom.Mag(), ele_pos_dt, weight); - _vtx_histos->Fill2DHisto("ele_cluster_energy_v_track_p_hh",ele_trk.getP(), eleClus.getEnergy(), weight); - _vtx_histos->Fill2DHisto("pos_cluster_energy_v_track_p_hh",pos_trk.getP(), posClus.getEnergy(), weight); - _vtx_histos->Fill2DHisto("ele_track_cluster_dt_v_EoverP_hh",eleClus.getEnergy()/ele_trk.getP(), ele_trk.getTrackTime() - corr_eleClusterTime, weight); - _vtx_histos->Fill2DHisto("pos_track_cluster_dt_v_EoverP_hh",posClus.getEnergy()/pos_trk.getP(), pos_trk.getTrackTime() - corr_posClusterTime, weight); - _vtx_histos->Fill2DHisto("ele_track_clus_dt_v_p_hh",ele_trk.getP(), ele_trk.getTrackTime() - corr_eleClusterTime, weight); - _vtx_histos->Fill2DHisto("pos_track_clus_dt_v_p_hh",pos_trk.getP(), pos_trk.getTrackTime() - corr_posClusterTime, weight); - //chi2 2d plots - _vtx_histos->Fill2DHisto("ele_track_chi2ndf_v_time_hh", ele_trk.getTrackTime(), ele_trk.getChi2Ndf(), weight); - _vtx_histos->Fill2DHisto("ele_track_chi2ndf_v_p_hh", ele_trk.getP(), ele_trk.getChi2Ndf(), weight); - _vtx_histos->Fill2DHisto("ele_track_chi2ndf_v_tanlambda_hh", ele_trk.getTanLambda(), ele_trk.getChi2Ndf(), weight); - _vtx_histos->Fill2DHisto("ele_track_chi2ndf_v_n2dhits_hh", ele2dHits, ele_trk.getChi2Ndf(), weight); - - _vtx_histos->Fill2DHisto("pos_track_chi2ndf_v_time_hh", pos_trk.getTrackTime(), pos_trk.getChi2Ndf(), weight); - _vtx_histos->Fill2DHisto("pos_track_chi2ndf_v_p_hh", pos_trk.getP(), pos_trk.getChi2Ndf(), weight); - _vtx_histos->Fill2DHisto("pos_track_chi2ndf_v_tanlambda_hh", pos_trk.getTanLambda(), pos_trk.getChi2Ndf(), weight); - _vtx_histos->Fill2DHisto("pos_track_chi2ndf_v_n2dhits_hh", pos2dHits, pos_trk.getChi2Ndf(), weight); - - _vtx_histos->Fill2DHisto("ele_track_p_v_tanlambda_hh", ele_trk.getTanLambda(), ele_trk.getP(), weight); - _vtx_histos->Fill2DHisto("pos_track_p_v_tanlambda_hh", pos_trk.getTanLambda(), pos_trk.getP(), weight); - - - //1d histos - _vtx_histos->Fill1DHisto("ele_track_clus_dt_h", ele_trk.getTrackTime() - corr_eleClusterTime, weight); - _vtx_histos->Fill1DHisto("pos_track_clus_dt_h", pos_trk.getTrackTime() - corr_posClusterTime, weight); - - passVtxPresel = true; - - selected_vtxs.push_back(vtx); - vtxSelector->clearSelector(); - } - - // std::cout << "Number of selected vtxs: " << selected_vtxs.size() << std::endl; - - _vtx_histos->Fill1DHisto("n_vertices_h",selected_vtxs.size()); - - - //not working atm - //hps_evt->addVertexCollection("selected_vtxs", selected_vtxs); - - //Make Plots for each region: loop on each region. Check if the region has the cut and apply it - //TODO Clean this up => Cuts should be implemented in each region? - //TODO Bring the preselection out of this stupid loop - - - - if (debug_) std::cout << "start regions" << std::endl; - //TODO add yields. => Quite terrible way to loop. - for (auto region : _regions ) { - - int nGoodVtx = 0; - Vertex* goodVtx = nullptr; - std::vector goodVtxs; - - float truePsum = -1; - float trueEsum = -1; - - for ( auto vtx : selected_vtxs) { - - //No cuts. - _reg_vtx_selectors[region]->getCutFlowHisto()->Fill(0.,weight); - - - Particle* ele = nullptr; - Particle* pos = nullptr; - - _ah->GetParticlesFromVtx(vtx,ele,pos); - - CalCluster eleClus = ele->getCluster(); - CalCluster posClus = pos->getCluster(); - //vtx X position - if (!_reg_vtx_selectors[region]->passCutLt("uncVtxX_lt",fabs(vtx->getX()),weight)) - continue; - - //vtx Y position - if (!_reg_vtx_selectors[region]->passCutLt("uncVtxY_lt",fabs(vtx->getY()),weight)) - continue; - - //vtx Z position - if (!_reg_vtx_selectors[region]->passCutGt("uncVtxZ_gt",vtx->getZ(),weight)) - continue; - - double ele_E = ele->getEnergy(); - double pos_E = pos->getEnergy(); - - //Compute analysis variables here. - - Track ele_trk = ele->getTrack(); - Track pos_trk = pos->getTrack(); - - //Beam Position Corrections - ele_trk.applyCorrection("z0", beamPosCorrections_.at(1)); - pos_trk.applyCorrection("z0", beamPosCorrections_.at(1)); - //Track Time Corrections - ele_trk.applyCorrection("track_time",eleTrackTimeBias_); - pos_trk.applyCorrection("track_time", posTrackTimeBias_); - - if (biasingTool_) { - - //Correct the wrong Bfield first - if (bFieldScaleFactor_ > 0) { - biasingTool_->updateWithBiasP(ele_trk,bFieldScaleFactor_); - biasingTool_->updateWithBiasP(pos_trk,bFieldScaleFactor_); - } - - biasingTool_->updateWithBiasP(ele_trk); - biasingTool_->updateWithBiasP(pos_trk); - } - - double invm_smear = 1.; - if (smearingTool_) { - double unsmeared_prod = ele_trk.getP()*pos_trk.getP(); - smearingTool_->updateWithSmearP(ele_trk); - smearingTool_->updateWithSmearP(pos_trk); - double smeared_prod = ele_trk.getP()*pos_trk.getP(); - invm_smear = sqrt(smeared_prod/unsmeared_prod); - } - - //Add the momenta to the tracks - //ele_trk.setMomentum(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); - //pos_trk.setMomentum(pos->getMomentum()[0],pos->getMomentum()[1],pos->getMomentum()[2]); - TVector3 recEleP(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); - TLorentzVector p_ele; - p_ele.SetPxPyPzE(ele_trk.getMomentum()[0],ele_trk.getMomentum()[1],ele_trk.getMomentum()[2], ele_E); - TLorentzVector p_pos; - p_pos.SetPxPyPzE(pos_trk.getMomentum()[0],pos_trk.getMomentum()[1],pos_trk.getMomentum()[2], pos_E); - - //Get the layers hit on each track - std::vector ele_hit_layers = ele_trk.getHitLayers(); - int ele_Si0 = 0; - int ele_Si1 = 0; - int ele_lastlayer = 0; - for(int i=0; i pos_hit_layers = pos_trk.getHitLayers(); - int pos_Si0 = 0; - int pos_Si1 = 0; - int pos_lastlayer = 0; - for(int i=0; iInnermostLayerCheck(&ele_trk, foundL1ele, foundL2ele); - - - if (debug_) { - std::cout<<"Check on pos_Track"<InnermostLayerCheck(&pos_trk, foundL1pos, foundL2pos); - - if (debug_) { - std::cout<<"Check on pos_Track"<passCutEq("Pair1_eq",(int)evth_->isPair1Trigger(),weight)) - break; - } - //Ele Track Time - if (!_reg_vtx_selectors[region]->passCutLt("eleTrkTime_lt",fabs(ele_trk.getTrackTime()),weight)) - continue; - - //Pos Track Time - if (!_reg_vtx_selectors[region]->passCutLt("posTrkTime_lt",fabs(pos_trk.getTrackTime()),weight)) - continue; - - //Ele Track-cluster match - if (!_reg_vtx_selectors[region]->passCutLt("eleTrkCluMatch_lt",ele->getGoodnessOfPID(),weight)) - continue; - - //Pos Track-cluster match - if (!_reg_vtx_selectors[region]->passCutLt("posTrkCluMatch_lt",pos->getGoodnessOfPID(),weight)) - continue; - - //Require Positron Cluster exists - if (!_reg_vtx_selectors[region]->passCutGt("posClusE_gt",posClus.getEnergy(),weight)) - continue; - - //Require Positron Cluster does NOT exists - if (!_reg_vtx_selectors[region]->passCutLt("posClusE_lt",posClus.getEnergy(),weight)) - continue; - - double corr_eleClusterTime = ele->getCluster().getTime() - timeOffset_; - double corr_posClusterTime = pos->getCluster().getTime() - timeOffset_; - - double botClusTime = 0.0; - if(ele->getCluster().getPosition().at(1) < 0.0) botClusTime = ele->getCluster().getTime(); - else botClusTime = pos->getCluster().getTime(); - - //Bottom Cluster Time - if (!_reg_vtx_selectors[region]->passCutLt("botCluTime_lt", botClusTime, weight)) - continue; - - if (!_reg_vtx_selectors[region]->passCutGt("botCluTime_gt", botClusTime, weight)) - continue; - - //Ele Pos Cluster Time Difference - if (!_reg_vtx_selectors[region]->passCutLt("eleposCluTimeDiff_lt",fabs(corr_eleClusterTime - corr_posClusterTime),weight)) - continue; - - //Ele Track-Cluster Time Difference - if (!_reg_vtx_selectors[region]->passCutLt("eleTrkCluTimeDiff_lt",fabs(ele_trk.getTrackTime() - corr_eleClusterTime),weight)) - continue; - - //Pos Track-Cluster Time Difference - if (!_reg_vtx_selectors[region]->passCutLt("posTrkCluTimeDiff_lt",fabs(pos_trk.getTrackTime() - corr_posClusterTime),weight)) - continue; - - TVector3 ele_mom; - ele_mom.SetX(ele_trk.getMomentum()[0]); - ele_mom.SetY(ele_trk.getMomentum()[1]); - ele_mom.SetZ(ele_trk.getMomentum()[2]); - - - TVector3 pos_mom; - pos_mom.SetX(pos_trk.getMomentum()[0]); - pos_mom.SetY(pos_trk.getMomentum()[1]); - pos_mom.SetZ(pos_trk.getMomentum()[2]); - - //Ele Track Quality - Chi2 - if (!_reg_vtx_selectors[region]->passCutLt("eleTrkChi2_lt",ele_trk.getChi2(),weight)) - continue; + if (debug_) std::cout << "start regions" << std::endl; + //TODO add yields. => Quite terrible way to loop. + for (auto region : _regions ) { - //Pos Track Quality - Chi2 - if (!_reg_vtx_selectors[region]->passCutLt("posTrkChi2_lt",pos_trk.getChi2(),weight)) - continue; + int nGoodVtx = 0; + Vertex* goodVtx = nullptr; + std::vector goodVtxs; + + float truePsum = -1; + float trueEsum = -1; + + for ( auto vtx : selected_vtxs) { + + //No cuts. + _reg_vtx_selectors[region]->getCutFlowHisto()->Fill(0.,weight); + + + Particle* ele = nullptr; + Particle* pos = nullptr; + + _ah->GetParticlesFromVtx(vtx,ele,pos); + + CalCluster eleClus = ele->getCluster(); + CalCluster posClus = pos->getCluster(); + //vtx X position + if (!_reg_vtx_selectors[region]->passCutLt("uncVtxX_lt",fabs(vtx->getX()),weight)) + continue; + + //vtx Y position + if (!_reg_vtx_selectors[region]->passCutLt("uncVtxY_lt",fabs(vtx->getY()),weight)) + continue; + + //vtx Z position + if (!_reg_vtx_selectors[region]->passCutGt("uncVtxZ_gt",vtx->getZ(),weight)) + continue; + + double ele_E = ele->getEnergy(); + double pos_E = pos->getEnergy(); + + //Compute analysis variables here. + + Track ele_trk = ele->getTrack(); + Track pos_trk = pos->getTrack(); + + //Apply Track Bias Corrections + for (const auto& pair : trackBiasCorrections_){ + ele_trk.applyCorrection(pair.first, pair.second); + pos_trk.applyCorrection(pair.first, pair.second); + } + + /* + //Beam Position Corrections + ele_trk.applyCorrection("z0", beamPosCorrections_.at(1)); + pos_trk.applyCorrection("z0", beamPosCorrections_.at(1)); + //Track Time Corrections + ele_trk.applyCorrection("track_time",eleTrackTimeBias_); + pos_trk.applyCorrection("track_time", posTrackTimeBias_); + */ + if (biasingTool_) { + + //Correct the wrong Bfield first + if (bFieldScaleFactor_ > 0) { + biasingTool_->updateWithBiasP(ele_trk,bFieldScaleFactor_); + biasingTool_->updateWithBiasP(pos_trk,bFieldScaleFactor_); + } + + biasingTool_->updateWithBiasP(ele_trk); + biasingTool_->updateWithBiasP(pos_trk); + } + + double invm_smear = 1.; + //std::cout << "[Region loop Vtxs] ele track p before smearing: " << ele_trk.getP() << std::endl; + if (smearingTool_) { + double unsmeared_prod = ele_trk.getP()*pos_trk.getP(); + double ele_smf = smearingTool_->updateWithSmearP(ele_trk); + double pos_smf = smearingTool_->updateWithSmearP(pos_trk); + double smeared_prod = ele_trk.getP()*pos_trk.getP(); + invm_smear = sqrt(smeared_prod/unsmeared_prod); + smearingTool_->updateVertexWithSmearP(vtx, ele_smf, pos_smf); + } + //std::cout << "[Region loop vtxs] ele track p after smearing: " << ele_trk.getP() << std::endl; + + //Add the momenta to the tracks + //ele_trk.setMomentum(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); + //pos_trk.setMomentum(pos->getMomentum()[0],pos->getMomentum()[1],pos->getMomentum()[2]); + TVector3 recEleP(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); + TLorentzVector p_ele; + p_ele.SetPxPyPzE(ele_trk.getMomentum()[0],ele_trk.getMomentum()[1],ele_trk.getMomentum()[2], ele_E); + TLorentzVector p_pos; + p_pos.SetPxPyPzE(pos_trk.getMomentum()[0],pos_trk.getMomentum()[1],pos_trk.getMomentum()[2], pos_E); + + //Get the layers hit on each track + std::vector ele_hit_layers = ele_trk.getHitLayers(); + int ele_Si0 = 0; + int ele_Si1 = 0; + int ele_lastlayer = 0; + for(int i=0; i pos_hit_layers = pos_trk.getHitLayers(); + int pos_Si0 = 0; + int pos_Si1 = 0; + int pos_lastlayer = 0; + for(int i=0; iInnermostLayerCheck(&ele_trk, foundL1ele, foundL2ele); + + + if (debug_) { + std::cout<<"Check on pos_Track"<InnermostLayerCheck(&pos_trk, foundL1pos, foundL2pos); + + if (debug_) { + std::cout<<"Check on pos_Track"<passCutEq("Pair1_eq",(int)evth_->isPair1Trigger(),weight)) + break; + } + //Ele Track Time + if (!_reg_vtx_selectors[region]->passCutLt("eleTrkTime_lt",fabs(ele_trk.getTrackTime()),weight)) + continue; + + //Pos Track Time + if (!_reg_vtx_selectors[region]->passCutLt("posTrkTime_lt",fabs(pos_trk.getTrackTime()),weight)) + continue; + + //Ele Track-cluster match + if (!_reg_vtx_selectors[region]->passCutLt("eleTrkCluMatch_lt",ele->getGoodnessOfPID(),weight)) + continue; + + //Pos Track-cluster match + if (!_reg_vtx_selectors[region]->passCutLt("posTrkCluMatch_lt",pos->getGoodnessOfPID(),weight)) + continue; + + //Require Positron Cluster exists + if (!_reg_vtx_selectors[region]->passCutGt("posClusE_gt",posClus.getEnergy(),weight)) + continue; + + //Require Positron Cluster does NOT exists + if (!_reg_vtx_selectors[region]->passCutLt("posClusE_lt",posClus.getEnergy(),weight)) + continue; + + double corr_eleClusterTime = ele->getCluster().getTime() - timeOffset_; + double corr_posClusterTime = pos->getCluster().getTime() - timeOffset_; + + double botClusTime = 0.0; + if(ele->getCluster().getPosition().at(1) < 0.0) botClusTime = ele->getCluster().getTime(); + else botClusTime = pos->getCluster().getTime(); + + //Bottom Cluster Time + if (!_reg_vtx_selectors[region]->passCutLt("botCluTime_lt", botClusTime, weight)) + continue; + + if (!_reg_vtx_selectors[region]->passCutGt("botCluTime_gt", botClusTime, weight)) + continue; + + //Ele Pos Cluster Time Difference + if (!_reg_vtx_selectors[region]->passCutLt("eleposCluTimeDiff_lt",fabs(corr_eleClusterTime - corr_posClusterTime),weight)) + continue; + + //Ele Track-Cluster Time Difference + if (!_reg_vtx_selectors[region]->passCutLt("eleTrkCluTimeDiff_lt",fabs(ele_trk.getTrackTime() - corr_eleClusterTime),weight)) + continue; + + //Pos Track-Cluster Time Difference + if (!_reg_vtx_selectors[region]->passCutLt("posTrkCluTimeDiff_lt",fabs(pos_trk.getTrackTime() - corr_posClusterTime),weight)) + continue; + + TVector3 ele_mom; + ele_mom.SetX(ele_trk.getMomentum()[0]); + ele_mom.SetY(ele_trk.getMomentum()[1]); + ele_mom.SetZ(ele_trk.getMomentum()[2]); + + + TVector3 pos_mom; + pos_mom.SetX(pos_trk.getMomentum()[0]); + pos_mom.SetY(pos_trk.getMomentum()[1]); + pos_mom.SetZ(pos_trk.getMomentum()[2]); + + //Ele Track Quality - Chi2 + if (!_reg_vtx_selectors[region]->passCutLt("eleTrkChi2_lt",ele_trk.getChi2(),weight)) + continue; - //Ele Track Quality - Chi2Ndf - if (!_reg_vtx_selectors[region]->passCutLt("eleTrkChi2Ndf_lt",ele_trk.getChi2Ndf(),weight)) - continue; - - //Pos Track Quality - Chi2Ndf - if (!_reg_vtx_selectors[region]->passCutLt("posTrkChi2Ndf_lt",pos_trk.getChi2Ndf(),weight)) - continue; - - //Beam Electron cut - if (!_reg_vtx_selectors[region]->passCutLt("eleMom_lt",ele_mom.Mag(),weight)) - continue; - - //Ele min momentum cut - if (!_reg_vtx_selectors[region]->passCutGt("eleMom_gt",ele_mom.Mag(),weight)) - continue; - - //Pos min momentum cut - if (!_reg_vtx_selectors[region]->passCutGt("posMom_gt",pos_mom.Mag(),weight)) - continue; - - //Ele nHits - int ele2dHits = ele_trk.getTrackerHitCount(); - if (!ele_trk.isKalmanTrack()) - ele2dHits*=2; - - if (!_reg_vtx_selectors[region]->passCutGt("eleN2Dhits_gt",ele2dHits,weight)) { - continue; - } - - //Pos nHits - int pos2dHits = pos_trk.getTrackerHitCount(); - if (!pos_trk.isKalmanTrack()) - pos2dHits*=2; + //Pos Track Quality - Chi2 + if (!_reg_vtx_selectors[region]->passCutLt("posTrkChi2_lt",pos_trk.getChi2(),weight)) + continue; - if (!_reg_vtx_selectors[region]->passCutGt("posN2Dhits_gt",pos2dHits,weight)) { - continue; - } - - //Less than 4 shared hits for ele/pos track - if (!_reg_vtx_selectors[region]->passCutLt("eleNshared_lt",ele_trk.getNShared(),weight)) { - continue; - } - - if (!_reg_vtx_selectors[region]->passCutLt("posNshared_lt",pos_trk.getNShared(),weight)) { - continue; - } - - //Vertex Quality - if (!_reg_vtx_selectors[region]->passCutLt("chi2unc_lt",vtx->getChi2(),weight)) - continue; - - //Max vtx momentum - if (!_reg_vtx_selectors[region]->passCutLt("maxVtxMom_lt",(ele_mom+pos_mom).Mag(),weight)) - continue; - - //Min vtx momentum - if (!_reg_vtx_selectors[region]->passCutGt("minVtxMom_gt",(ele_mom+pos_mom).Mag(),weight)) - continue; - - //END PRESELECTION CUTS - - //L1 requirement - if (!_reg_vtx_selectors[region]->passCutEq("L1Requirement_eq",(int)(foundL1ele&&foundL1pos),weight)) - continue; - - //L2 requirement - if (!_reg_vtx_selectors[region]->passCutEq("L2Requirement_eq",(int)(foundL2ele&&foundL2pos),weight)) - continue; - - //L1 requirement for positron - if (!_reg_vtx_selectors[region]->passCutEq("L1PosReq_eq",(int)(foundL1pos),weight)) - continue; - - //ESum low cut - if (!_reg_vtx_selectors[region]->passCutLt("eSum_lt",(ele_E+pos_E),weight)) - continue; - - //ESum high cut - if (!_reg_vtx_selectors[region]->passCutGt("eSum_gt",(ele_E+pos_E),weight)) - continue; - - //PSum low cut - if (!_reg_vtx_selectors[region]->passCutLt("pSum_lt",(p_ele.P()+p_pos.P()),weight)) - continue; - - //PSum high cut - if (!_reg_vtx_selectors[region]->passCutGt("pSum_gt",(p_ele.P()+p_pos.P()),weight)) - continue; - - //Require Electron Cluster exists - if (!_reg_vtx_selectors[region]->passCutGt("eleClusE_gt",eleClus.getEnergy(),weight)) - continue; - - - //Require Electron Cluster does NOT exists - if (!_reg_vtx_selectors[region]->passCutLt("eleClusE_lt",eleClus.getEnergy(),weight)) - continue; - - //No shared hits requirement - if (!_reg_vtx_selectors[region]->passCutEq("ele_sharedL0_eq",(int)ele_trk.getSharedLy0(),weight)) - continue; - if (!_reg_vtx_selectors[region]->passCutEq("pos_sharedL0_eq",(int)pos_trk.getSharedLy0(),weight)) - continue; - if (!_reg_vtx_selectors[region]->passCutEq("ele_sharedL1_eq",(int)ele_trk.getSharedLy1(),weight)) - continue; - if (!_reg_vtx_selectors[region]->passCutEq("pos_sharedL1_eq",(int)pos_trk.getSharedLy1(),weight)) - continue; - - //Min vtx Y pos - if (!_reg_vtx_selectors[region]->passCutGt("VtxYPos_gt", vtx->getY(), weight)) - continue; - - //Max vtx Y pos - if (!_reg_vtx_selectors[region]->passCutLt("VtxYPos_lt", vtx->getY(), weight)) - continue; - - //Tracking Volume for positron - if (!_reg_vtx_selectors[region]->passCutGt("volPos_top", p_pos.Py(), weight)) - continue; - - if (!_reg_vtx_selectors[region]->passCutLt("volPos_bot", p_pos.Py(), weight)) - continue; - - if (!_reg_vtx_selectors[region]->passCutLt("deltaZ_lt", std::abs((ele_trk.getZ0()/ele_trk.getTanLambda()) - (pos_trk.getZ0()/pos_trk.getTanLambda())), weight)) - continue; - - //If this is MC check if MCParticle matched to the electron track is from rad or recoil - if(!isData_) - { - //Fill MC plots after all selections - _reg_mc_vtx_histos[region]->FillMCParticles(mcParts_, analysis_); - - //Count the number of hits per part on the ele track - std::map nHits4part; - for(int i =0; i < ele_trk.getMcpHits().size(); i++) - { - int partID = ele_trk.getMcpHits().at(i).second; - if ( nHits4part.find(partID) == nHits4part.end() ) - { - // not found - nHits4part[partID] = 1; - } - else - { - // found - nHits4part[partID]++; - } - } - - //Determine the MC part with the most hits on the track - int maxNHits = 0; - int maxID = 0; - for (std::map::iterator it=nHits4part.begin(); it!=nHits4part.end(); ++it) - { - if(it->second > maxNHits) - { - maxNHits = it->second; - maxID = it->first; - } - } - - //Find the correct mc part and grab mother id - int isRadEle = -999; - int isRecEle = -999; - - - trueEleP.SetXYZ(-999,-999,-999); - truePosP.SetXYZ(-999,-999,-999); - if (mcParts_) { - float trueEleE = -1; - float truePosE = -1; - for(int i = 0; i < mcParts_->size(); i++) - { - int momPDG = mcParts_->at(i)->getMomPDG(); - if(mcParts_->at(i)->getPDG() == 11 && momPDG == isRadPDG_) - { - std::vector lP = mcParts_->at(i)->getMomentum(); - trueEleP.SetXYZ(lP[0],lP[1],lP[2]); - trueEleE = mcParts_->at(i)->getEnergy(); - - } - if(mcParts_->at(i)->getPDG() == -11 && momPDG == isRadPDG_) - { - std::vector lP = mcParts_->at(i)->getMomentum(); - truePosP.SetXYZ(lP[0],lP[1],lP[2]); - truePosE = mcParts_->at(i)->getEnergy(); - - } - if(trueEleP.X() != -999 && truePosP.X() != -999){ - truePsum = trueEleP.Mag() + trueEleP.Mag(); - trueEsum = trueEleE + truePosE; - } - - if(mcParts_->at(i)->getID() != maxID) continue; - //Default isRadPDG = 622 - if(momPDG == isRadPDG_) isRadEle = 1; - if(momPDG == 623) isRecEle = 1; - } - } - double momRatio = recEleP.Mag() / trueEleP.Mag(); - double momAngle = trueEleP.Angle(recEleP) * TMath::RadToDeg(); - if (!_reg_vtx_selectors[region]->passCutLt("momRatio_lt", momRatio, weight)) continue; - if (!_reg_vtx_selectors[region]->passCutGt("momRatio_gt", momRatio, weight)) continue; - if (!_reg_vtx_selectors[region]->passCutLt("momAngle_lt", momAngle, weight)) continue; - - if (!_reg_vtx_selectors[region]->passCutEq("isRadEle_eq", isRadEle, weight)) continue; - if (!_reg_vtx_selectors[region]->passCutEq("isNotRadEle_eq", isRadEle, weight)) continue; - if (!_reg_vtx_selectors[region]->passCutEq("isRecEle_eq", isRecEle, weight)) continue; - } - - goodVtx = vtx; - nGoodVtx++; - goodVtxs.push_back(vtx); - } // selected vertices - - //N selected vertices - this is quite a silly cut to make at the end. But okay. that's how we decided atm. - if (!_reg_vtx_selectors[region]->passCutEq("nVtxs_eq", nGoodVtx, weight)) - continue; - //Move to after N vertices cut (was filled before) - _reg_vtx_histos[region]->Fill1DHisto("n_vertices_h", nGoodVtx, weight); - - //Loop over all selected vertices in the region - for(std::vector::iterator it = goodVtxs.begin(); it != goodVtxs.end(); it++){ - - Vertex* vtx = *it; - - Particle* ele = nullptr; - Particle* pos = nullptr; - - if (!vtx || !_ah->GetParticlesFromVtx(vtx,ele,pos)) - continue; - - CalCluster eleClus = ele->getCluster(); - CalCluster posClus = pos->getCluster(); - - double corr_eleClusterTime = ele->getCluster().getTime() - timeOffset_; - double corr_posClusterTime = pos->getCluster().getTime() - timeOffset_; - - double ele_E = ele->getEnergy(); - double pos_E = pos->getEnergy(); - - //Compute analysis variables here. - Track ele_trk = ele->getTrack(); - Track pos_trk = pos->getTrack(); - //Get the shared info - TODO change and improve - - //Track Time Corrections - ele_trk.applyCorrection("track_time",eleTrackTimeBias_); - pos_trk.applyCorrection("track_time", posTrackTimeBias_); - - - // Track Momentum bias - - if (biasingTool_) { - - // Correct for wrong track momentum - Bug Fix - // In case there was mis-configuration during reco/hpstr-ntuple step, correct - // the momentum magnitude here using the right bField for the data taking year - - if (bFieldScaleFactor_ > 0) { - biasingTool_->updateWithBiasP(ele_trk,bFieldScaleFactor_); - biasingTool_->updateWithBiasP(pos_trk,bFieldScaleFactor_); - } - - - biasingTool_->updateWithBiasP(ele_trk); - biasingTool_->updateWithBiasP(pos_trk); - } - - - double invm_smear = 1.; - if (smearingTool_) { - double unsmeared_prod = ele_trk.getP()*pos_trk.getP(); - smearingTool_->updateWithSmearP(ele_trk); - smearingTool_->updateWithSmearP(pos_trk); - double smeared_prod = ele_trk.getP()*pos_trk.getP(); - invm_smear = sqrt(smeared_prod/unsmeared_prod); - } - - //Get the layers hit on each track - std::vector ele_hit_layers = ele_trk.getHitLayers(); - int ele_Si0 = 0; - int ele_Si1 = 0; - int ele_lastlayer = 0; - for(int i=0; i pos_hit_layers = pos_trk.getHitLayers(); - int pos_Si0 = 0; - int pos_Si1 = 0; - int pos_lastlayer = 0; - for(int i=0; i vtx_cov = vtx->getCovariance(); - float cxx = vtx_cov.at(0); - float cyx = vtx_cov.at(1); - float cyy = vtx_cov.at(2); - float czx = vtx_cov.at(3); - float czy = vtx_cov.at(4); - float czz = vtx_cov.at(5); - - - //MC Truth hits in first 4 sensors - int L1L2hitCode = 0; //hit code '1111' means truth ax+ster hits in L1_ele, L1_pos, L2_ele, L2_pos - int L1hitCode = 0; //hit code '1111' means truth in L1_ele_ax, L1_ele_ster, L1_pos_ax, L1_pos_ster - int L2hitCode = 0; // hit code '1111' means truth in L2_ele_ax, L2_ele_ster, L2_pos_ax, L2_pos_ster - if(!isData_){ - //Get hit codes. Only sure this works for 2016 KF as is. - utils::get2016KFMCTruthHitCodes(&ele_trk, &pos_trk, L1L2hitCode, L1hitCode, L2hitCode); - //L1L2 truth hit selection - if (!_reg_vtx_selectors[region]->passCutLt("hitCode_lt",((double)L1L2hitCode)-0.5, weight)) continue; - if (!_reg_vtx_selectors[region]->passCutGt("hitCode_gt",((double)L1L2hitCode)+0.5, weight)) continue; - //Fil hitcodes - _reg_vtx_histos[region]->Fill1DHisto("hitCode_h", L1L2hitCode,weight); - _reg_vtx_histos[region]->Fill1DHisto("L1hitCode_h", L1hitCode,weight); - _reg_vtx_histos[region]->Fill1DHisto("L2hitCode_h", L2hitCode,weight); - } - - //track isolations - //Only calculate isolations if both track L1 and L2 hits exist - bool hasL1ele = false; - bool hasL2ele = false; - _ah->InnermostLayerCheck(&ele_trk, hasL1ele, hasL2ele); - - bool hasL1pos = false; - bool hasL2pos = false; - _ah->InnermostLayerCheck(&pos_trk, hasL1pos, hasL2pos); - - TVector3 ele_mom; - //ele_mom.SetX(ele->getMomentum()[0]); - //ele_mom.SetY(ele->getMomentum()[1]); - //ele_mom.SetZ(ele->getMomentum()[2]); - ele_mom.SetX(ele_trk.getMomentum()[0]); - ele_mom.SetY(ele_trk.getMomentum()[1]); - ele_mom.SetZ(ele_trk.getMomentum()[2]); - - - TVector3 pos_mom; - //pos_mom.SetX(pos->getMomentum()[0]); - //pos_mom.SetY(pos->getMomentum()[1]); - //pos_mom.SetZ(pos->getMomentum()[2]); - pos_mom.SetX(pos_trk.getMomentum()[0]); - pos_mom.SetY(pos_trk.getMomentum()[1]); - pos_mom.SetZ(pos_trk.getMomentum()[2]); - - double ele_pos_dt = corr_eleClusterTime - corr_posClusterTime; - double psum = ele_mom.Mag()+pos_mom.Mag(); - - //Ele nHits - int ele2dHits = ele_trk.getTrackerHitCount(); - if (!ele_trk.isKalmanTrack()) - ele2dHits*=2; - - //pos nHits - int pos2dHits = pos_trk.getTrackerHitCount(); - - if(ts_ != nullptr) - { - _reg_vtx_histos[region]->Fill2DHisto("trig_count_hh", - ((int)ts_->prescaled.Single_3_Top)+((int)ts_->prescaled.Single_3_Bot), - ((int)ts_->prescaled.Single_2_Top)+((int)ts_->prescaled.Single_2_Bot)); - } - _reg_vtx_histos[region]->Fill1DHisto("n_vtx_h", vtxs_->size()); - - //Add the momenta to the tracks - //ele_trk.setMomentum(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); - //pos_trk.setMomentum(pos->getMomentum()[0],pos->getMomentum()[1],pos->getMomentum()[2]); - TVector3 recEleP(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); - TLorentzVector p_ele; - p_ele.SetPxPyPzE(ele_trk.getMomentum()[0],ele_trk.getMomentum()[1],ele_trk.getMomentum()[2], ele_E); - TLorentzVector p_pos; - p_pos.SetPxPyPzE(pos_trk.getMomentum()[0],pos_trk.getMomentum()[1],pos_trk.getMomentum()[2], pos_E); - - - _reg_vtx_histos[region]->Fill2DHistograms(vtx,weight); - _reg_vtx_histos[region]->Fill1DVertex(vtx, - ele, - pos, - &ele_trk, - &pos_trk, - weight); - - _reg_vtx_histos[region]->Fill1DHisto("ele_pos_clusTimeDiff_h", (corr_eleClusterTime - corr_posClusterTime), weight); - _reg_vtx_histos[region]->Fill1DHisto("ele_track_n2dhits_h", ele2dHits, weight); - _reg_vtx_histos[region]->Fill1DHisto("pos_track_n2dhits_h", pos2dHits, weight); - _reg_vtx_histos[region]->Fill1DHisto("vtx_Psum_h", p_ele.P()+p_pos.P(), weight); - _reg_vtx_histos[region]->Fill1DHisto("vtx_Esum_h", eleClus.getEnergy()+posClus.getEnergy(), weight); - _reg_vtx_histos[region]->Fill1DHisto("vtx_smear_InvM_h", invm_smear*(vtx->getInvMass()), weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_vtxZ_iso_hh", TMath::Min(ele_trk.getIsolation(0), ele_trk.getIsolation(1)), vtx->getZ(), weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_vtxZ_iso_hh", TMath::Min(pos_trk.getIsolation(0), pos_trk.getIsolation(1)), vtx->getZ(), weight); - _reg_vtx_histos[region]->Fill2DTrack(&ele_trk,weight,"ele_"); - _reg_vtx_histos[region]->Fill2DTrack(&pos_trk,weight,"pos_"); - _reg_vtx_histos[region]->Fill1DHisto("mcMass622_h",apMass); - _reg_vtx_histos[region]->Fill1DHisto("mcZ622_h",apZ); - _reg_vtx_histos[region]->Fill1DHisto("mcMass625_h",vdMass); - _reg_vtx_histos[region]->Fill1DHisto("mcZ625_h",vdZ); - - - //Just for the selected vertex - if(!isData_) - { - _reg_vtx_histos[region]->Fill2DHisto("vtx_Esum_vs_true_Esum_hh",eleClus.getEnergy()+posClus.getEnergy(), trueEsum, weight); - _reg_vtx_histos[region]->Fill2DHisto("vtx_Psum_vs_true_Psum_hh",p_ele.P()+p_pos.P(), truePsum, weight); - _reg_vtx_histos[region]->Fill1DHisto("true_vtx_psum_h",truePsum,weight); - } - - double reconz = vtx->getZ(); - double ele_trk_z0 = ele_trk.getZ0(); - double ele_trk_z0err = ele_trk.getZ0Err(); - double pos_trk_z0 = pos_trk.getZ0(); - double pos_trk_z0err = pos_trk.getZ0Err(); - - //DeltaZ - double deltaZ = std::abs( (ele_trk_z0/ele_trk.getTanLambda()) - (pos_trk_z0/pos_trk.getTanLambda()) ); - - //Project vertex to target - double vtx_proj_x = -999.9; - double vtx_proj_y = -999.9; - double vtx_proj_x_sig = -999.9; - double vtx_proj_y_sig = -999.9; - double vtx_proj_sig = -999.9; - if(!v0ProjectionFitsCfg_.empty()) - vtx_proj_sig = utils::v0_projection_to_target_significance(v0proj_fits_, evth_->getRunNumber(), - vtx_proj_x, vtx_proj_y, vtx_proj_x_sig, vtx_proj_y_sig, vtx->getX(), vtx->getY(), - reconz, vtx->getP().X(), vtx->getP().Y(), vtx->getP().Z()); - - _reg_vtx_histos[region]->Fill2DHisto("unc_vtx_x_v_unc_vtx_y_hh", vtx->getX(), vtx->getY()); - _reg_vtx_histos[region]->Fill2DHisto("unc_vtx_proj_x_v_unc_vtx_proj_y_hh", vtx_proj_x, vtx_proj_y); - _reg_vtx_histos[region]->Fill2DHisto("unc_vtx_proj_x_y_significance_hh", vtx_proj_x_sig, vtx_proj_y_sig); - _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_vtx_proj_significance_hh", vtx_proj_sig, reconz); - _reg_vtx_histos[region]->Fill2DHisto("vtx_track_reconz_v_Z0err_hh", ele_trk_z0err, reconz); - _reg_vtx_histos[region]->Fill2DHisto("vtx_track_reconz_v_Z0err_hh", pos_trk_z0err, reconz); - _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_z0_hh", ele_trk_z0, reconz); - _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_z0_hh", pos_trk_z0, reconz); - _reg_vtx_histos[region]->Fill2DHisto("vtx_track_recon_z_v_ABSdz0tanlambda_hh", std::abs((ele_trk_z0/ele_trk.getTanLambda()) - (pos_trk_z0/pos_trk.getTanLambda())), reconz); - _reg_vtx_histos[region]->Fill2DHisto("vtx_track_recon_z_v_dz0tanlambda_hh", ((ele_trk_z0/ele_trk.getTanLambda()) - (pos_trk_z0/pos_trk.getTanLambda())), reconz); - - _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_cxx_hh", cxx, reconz); - _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_cyy_hh", cyy, reconz); - _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_czz_hh", czz, reconz); - _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_cyx_hh", cyx, reconz); - _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_czx_hh", czx, reconz); - _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_czy_hh", czy, reconz); - _reg_vtx_histos[region]->Fill1DHisto("cxx_h", cxx); - _reg_vtx_histos[region]->Fill1DHisto("cyy_h", cyy); - _reg_vtx_histos[region]->Fill1DHisto("czz_h", czz); - _reg_vtx_histos[region]->Fill1DHisto("cyx_h", cyx); - _reg_vtx_histos[region]->Fill1DHisto("czx_h", czx); - _reg_vtx_histos[region]->Fill1DHisto("czy_h", czy); - _reg_vtx_histos[region]->Fill2DHisto("vtx_track_recon_z_v_z0tanlambda_hh", ele_trk_z0/ele_trk.getTanLambda(), reconz); - _reg_vtx_histos[region]->Fill2DHisto("vtx_track_recon_z_v_z0tanlambda_hh", pos_trk_z0/pos_trk.getTanLambda(), reconz); - _reg_vtx_histos[region]->Fill2DHisto("ele_clusT_v_ele_trackT_hh", ele_trk.getTrackTime(), corr_eleClusterTime, weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_clusT_v_pos_trackT_hh", pos_trk.getTrackTime(), corr_posClusterTime, weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_track_time_v_P_hh", ele_trk.getP(), ele_trk.getTrackTime(), weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_track_time_v_P_hh", pos_trk.getP(), pos_trk.getTrackTime(), weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_pos_clusTimeDiff_v_pSum_hh",ele_mom.Mag()+pos_mom.Mag(), ele_pos_dt, weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_cluster_energy_v_track_p_hh",ele_trk.getP(), eleClus.getEnergy(), weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_cluster_energy_v_track_p_hh",pos_trk.getP(), posClus.getEnergy(), weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_track_cluster_dt_v_EoverP_hh",eleClus.getEnergy()/ele_trk.getP(), ele_trk.getTrackTime() - corr_eleClusterTime, weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_track_cluster_dt_v_EoverP_hh",posClus.getEnergy()/pos_trk.getP(), pos_trk.getTrackTime() - corr_posClusterTime, weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_track_clus_dt_v_p_hh",ele_trk.getP(), ele_trk.getTrackTime() - corr_eleClusterTime, weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_track_clus_dt_v_p_hh",pos_trk.getP(), pos_trk.getTrackTime() - corr_posClusterTime, weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_z0_vs_pos_z0_hh",ele_trk.getZ0(), pos_trk.getZ0(), weight); - - //chi2 2d plots - _reg_vtx_histos[region]->Fill2DHisto("ele_track_chi2ndf_v_time_hh", ele_trk.getTrackTime(), ele_trk.getChi2Ndf(), weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_track_chi2ndf_v_p_hh", ele_trk.getP(), ele_trk.getChi2Ndf(), weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_track_chi2ndf_v_tanlambda_hh", ele_trk.getTanLambda(), ele_trk.getChi2Ndf(), weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_track_chi2ndf_v_n2dhits_hh", ele2dHits, ele_trk.getChi2Ndf(), weight); - - _reg_vtx_histos[region]->Fill2DHisto("pos_track_chi2ndf_v_time_hh", pos_trk.getTrackTime(), pos_trk.getChi2Ndf(), weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_track_chi2ndf_v_p_hh", pos_trk.getP(), pos_trk.getChi2Ndf(), weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_track_chi2ndf_v_tanlambda_hh", pos_trk.getTanLambda(), pos_trk.getChi2Ndf(), weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_track_chi2ndf_v_n2dhits_hh", pos2dHits, pos_trk.getChi2Ndf(), weight); - - _reg_vtx_histos[region]->Fill2DHisto("ele_track_p_v_tanlambda_hh", ele_trk.getTanLambda(), ele_trk.getP(), weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_track_p_v_tanlambda_hh", pos_trk.getTanLambda(), pos_trk.getP(), weight); - - - //1d histos - _reg_vtx_histos[region]->Fill1DHisto("ele_track_clus_dt_h", ele_trk.getTrackTime() - corr_eleClusterTime, weight); - _reg_vtx_histos[region]->Fill1DHisto("pos_track_clus_dt_h", pos_trk.getTrackTime() - corr_posClusterTime, weight); - - - //TODO put this in the Vertex! - TVector3 vtxPosSvt; - vtxPosSvt.SetX(vtx->getX()); - vtxPosSvt.SetY(vtx->getY()); - vtxPosSvt.SetZ(vtx->getZ()); - vtxPosSvt.RotateY(-0.0305); - - //Just for the selected vertex - if (makeFlatTuple_){ - if(!isData_){ - _reg_tuples[region]->setVariableValue("ap_true_vtx_z", apZ); - _reg_tuples[region]->setVariableValue("ap_true_vtx_mass", apMass); - _reg_tuples[region]->setVariableValue("ap_true_vtx_energy", apEnergy); - _reg_tuples[region]->setVariableValue("vd_true_vtx_z", vdZ); - _reg_tuples[region]->setVariableValue("vd_true_vtx_mass", vdMass); - _reg_tuples[region]->setVariableValue("vd_true_vtx_energy", vdEnergy); - _reg_tuples[region]->setVariableValue("hitCode", float(L1L2hitCode)); - _reg_tuples[region]->setVariableValue("L1hitCode", float(L1hitCode)); - _reg_tuples[region]->setVariableValue("L2hitCode", float(L2hitCode)); - } - - _reg_tuples[region]->setVariableValue("unc_vtx_mass", vtx->getInvMass()); - _reg_tuples[region]->setVariableValue("unc_vtx_z" , vtxPosSvt.Z()); - _reg_tuples[region]->setVariableValue("unc_vtx_chi2", vtx->getChi2()); - _reg_tuples[region]->setVariableValue("unc_vtx_psum", p_ele.P()+p_pos.P()); - _reg_tuples[region]->setVariableValue("unc_vtx_px", vtx->getP().X()); - _reg_tuples[region]->setVariableValue("unc_vtx_py", vtx->getP().Y()); - _reg_tuples[region]->setVariableValue("unc_vtx_pz", vtx->getP().Z()); - _reg_tuples[region]->setVariableValue("unc_vtx_x", vtx->getX()); - _reg_tuples[region]->setVariableValue("unc_vtx_y", vtx->getY()); - _reg_tuples[region]->setVariableValue("unc_vtx_proj_x", vtx_proj_x); - _reg_tuples[region]->setVariableValue("unc_vtx_proj_y", vtx_proj_y); - _reg_tuples[region]->setVariableValue("unc_vtx_proj_x_sig", vtx_proj_x_sig); - _reg_tuples[region]->setVariableValue("unc_vtx_proj_y_sig", vtx_proj_y_sig); - _reg_tuples[region]->setVariableValue("unc_vtx_proj_sig", vtx_proj_sig); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_pos_clust_dt", corr_eleClusterTime - corr_posClusterTime); - - _reg_tuples[region]->setVariableValue("unc_vtx_cxx", cxx); - _reg_tuples[region]->setVariableValue("unc_vtx_cyy", cyy); - _reg_tuples[region]->setVariableValue("unc_vtx_czz", czz); - _reg_tuples[region]->setVariableValue("unc_vtx_cyx", cyx); - _reg_tuples[region]->setVariableValue("unc_vtx_czy", czy); - _reg_tuples[region]->setVariableValue("unc_vtx_czx", czx); - _reg_tuples[region]->setVariableValue("unc_vtx_deltaZ", deltaZ); - - //track vars - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_p", ele_trk.getP()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_t", ele_trk.getTrackTime()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_d0", ele_trk.getD0()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_phi0", ele_trk.getPhi()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_omega", ele_trk.getOmega()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_tanLambda", ele_trk.getTanLambda()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_z0", ele_trk.getZ0()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_chi2ndf", ele_trk.getChi2Ndf()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_clust_dt", ele_trk.getTrackTime() - corr_eleClusterTime); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_z0Err",ele_trk.getZ0Err()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_d0Err", ele_trk.getD0Err()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_tanLambdaErr", ele_trk.getTanLambdaErr()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_PhiErr", ele_trk.getPhiErr()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_OmegaErr", ele_trk.getOmegaErr()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_nhits",ele2dHits); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_lastlayer",ele_lastlayer); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_si0",ele_Si0); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_si1",ele_Si1); - - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_p", pos_trk.getP()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_t", pos_trk.getTrackTime()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_d0", pos_trk.getD0()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_phi0", pos_trk.getPhi()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_omega", pos_trk.getOmega()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_tanLambda", pos_trk.getTanLambda()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_z0", pos_trk.getZ0()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_chi2ndf", pos_trk.getChi2Ndf()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_clust_dt", pos_trk.getTrackTime() - corr_posClusterTime); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_z0Err",pos_trk.getZ0Err()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_d0Err", pos_trk.getD0Err()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_tanLambdaErr", pos_trk.getTanLambdaErr()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_PhiErr", pos_trk.getPhiErr()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_OmegaErr", pos_trk.getOmegaErr()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_nhits",pos2dHits); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_lastlayer",pos_lastlayer); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_si0",pos_Si0); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_si1",pos_Si1); - - //clust vars - _reg_tuples[region]->setVariableValue("unc_vtx_ele_clust_E", eleClus.getEnergy()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_clust_x", eleClus.getPosition().at(0)); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_clust_corr_t",corr_eleClusterTime); - - _reg_tuples[region]->setVariableValue("unc_vtx_pos_clust_E", posClus.getEnergy()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_clust_x", posClus.getPosition().at(0)); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_clust_corr_t",corr_posClusterTime); - _reg_tuples[region]->setVariableValue("run_number", evth_->getRunNumber()); - - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_ecal_x", ele_trk.getPositionAtEcal().at(0)); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_ecal_y", ele_trk.getPositionAtEcal().at(1)); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_z", ele_trk.getPosition().at(2)); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_ecal_x", pos_trk.getPositionAtEcal().at(0)); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_ecal_y", pos_trk.getPositionAtEcal().at(1)); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_z", pos_trk.getPosition().at(2)); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_px", ele_trk.getMomentum().at(0)); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_py", ele_trk.getMomentum().at(1)); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_pz", ele_trk.getMomentum().at(2)); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_px", pos_trk.getMomentum().at(0)); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_py", pos_trk.getMomentum().at(1)); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_pz", pos_trk.getMomentum().at(2)); - - _reg_tuples[region]->fill(); - } - } - - }// regions - - return true; + //Ele Track Quality - Chi2Ndf + if (!_reg_vtx_selectors[region]->passCutLt("eleTrkChi2Ndf_lt",ele_trk.getChi2Ndf(),weight)) + continue; + + //Pos Track Quality - Chi2Ndf + if (!_reg_vtx_selectors[region]->passCutLt("posTrkChi2Ndf_lt",pos_trk.getChi2Ndf(),weight)) + continue; + + //Beam Electron cut + if (!_reg_vtx_selectors[region]->passCutLt("eleMom_lt",ele_mom.Mag(),weight)) + continue; + + //Ele min momentum cut + if (!_reg_vtx_selectors[region]->passCutGt("eleMom_gt",ele_mom.Mag(),weight)) + continue; + + //Pos min momentum cut + if (!_reg_vtx_selectors[region]->passCutGt("posMom_gt",pos_mom.Mag(),weight)) + continue; + + //Ele nHits + int ele2dHits = ele_trk.getTrackerHitCount(); + if (!ele_trk.isKalmanTrack()) + ele2dHits*=2; + + if (!_reg_vtx_selectors[region]->passCutGt("eleN2Dhits_gt",ele2dHits,weight)) { + continue; + } + + //Pos nHits + int pos2dHits = pos_trk.getTrackerHitCount(); + if (!pos_trk.isKalmanTrack()) + pos2dHits*=2; + + if (!_reg_vtx_selectors[region]->passCutGt("posN2Dhits_gt",pos2dHits,weight)) { + continue; + } + + //Less than 4 shared hits for ele/pos track + if (!_reg_vtx_selectors[region]->passCutLt("eleNshared_lt",ele_trk.getNShared(),weight)) { + continue; + } + + if (!_reg_vtx_selectors[region]->passCutLt("posNshared_lt",pos_trk.getNShared(),weight)) { + continue; + } + + //Vertex Quality + if (!_reg_vtx_selectors[region]->passCutLt("chi2unc_lt",vtx->getChi2(),weight)) + continue; + + //Max vtx momentum + if (!_reg_vtx_selectors[region]->passCutLt("maxVtxMom_lt",(ele_mom+pos_mom).Mag(),weight)) + continue; + + //Min vtx momentum + if (!_reg_vtx_selectors[region]->passCutGt("minVtxMom_gt",(ele_mom+pos_mom).Mag(),weight)) + continue; + + //END PRESELECTION CUTS + + //L1 requirement + if (!_reg_vtx_selectors[region]->passCutEq("L1Requirement_eq",(int)(foundL1ele&&foundL1pos),weight)) + continue; + + //L2 requirement + if (!_reg_vtx_selectors[region]->passCutEq("L2Requirement_eq",(int)(foundL2ele&&foundL2pos),weight)) + continue; + + //L1 requirement for positron + if (!_reg_vtx_selectors[region]->passCutEq("L1PosReq_eq",(int)(foundL1pos),weight)) + continue; + + //ESum low cut + if (!_reg_vtx_selectors[region]->passCutLt("eSum_lt",(ele_E+pos_E),weight)) + continue; + + //ESum high cut + if (!_reg_vtx_selectors[region]->passCutGt("eSum_gt",(ele_E+pos_E),weight)) + continue; + + //PSum low cut + if (!_reg_vtx_selectors[region]->passCutLt("pSum_lt",(p_ele.P()+p_pos.P()),weight)) + continue; + + //PSum high cut + if (!_reg_vtx_selectors[region]->passCutGt("pSum_gt",(p_ele.P()+p_pos.P()),weight)) + continue; + + //Require Electron Cluster exists + if (!_reg_vtx_selectors[region]->passCutGt("eleClusE_gt",eleClus.getEnergy(),weight)) + continue; + + + //Require Electron Cluster does NOT exists + if (!_reg_vtx_selectors[region]->passCutLt("eleClusE_lt",eleClus.getEnergy(),weight)) + continue; + + //No shared hits requirement + if (!_reg_vtx_selectors[region]->passCutEq("ele_sharedL0_eq",(int)ele_trk.getSharedLy0(),weight)) + continue; + if (!_reg_vtx_selectors[region]->passCutEq("pos_sharedL0_eq",(int)pos_trk.getSharedLy0(),weight)) + continue; + if (!_reg_vtx_selectors[region]->passCutEq("ele_sharedL1_eq",(int)ele_trk.getSharedLy1(),weight)) + continue; + if (!_reg_vtx_selectors[region]->passCutEq("pos_sharedL1_eq",(int)pos_trk.getSharedLy1(),weight)) + continue; + + //Min vtx Y pos + if (!_reg_vtx_selectors[region]->passCutGt("VtxYPos_gt", vtx->getY(), weight)) + continue; + + //Max vtx Y pos + if (!_reg_vtx_selectors[region]->passCutLt("VtxYPos_lt", vtx->getY(), weight)) + continue; + + //Tracking Volume for positron + if (!_reg_vtx_selectors[region]->passCutGt("volPos_top", p_pos.Py(), weight)) + continue; + + if (!_reg_vtx_selectors[region]->passCutLt("volPos_bot", p_pos.Py(), weight)) + continue; + + if (!_reg_vtx_selectors[region]->passCutLt("deltaZ_lt", std::abs((ele_trk.getZ0()/ele_trk.getTanLambda()) - (pos_trk.getZ0()/pos_trk.getTanLambda())), weight)) + continue; + + //If this is MC check if MCParticle matched to the electron track is from rad or recoil + if(!isData_) + { + //Fill MC plots after all selections + _reg_mc_vtx_histos[region]->FillMCParticles(mcParts_, analysis_); + + //Count the number of hits per part on the ele track + std::map nHits4part; + for(int i =0; i < ele_trk.getMcpHits().size(); i++) + { + int partID = ele_trk.getMcpHits().at(i).second; + if ( nHits4part.find(partID) == nHits4part.end() ) + { + // not found + nHits4part[partID] = 1; + } + else + { + // found + nHits4part[partID]++; + } + } + + //Determine the MC part with the most hits on the track + int maxNHits = 0; + int maxID = 0; + for (std::map::iterator it=nHits4part.begin(); it!=nHits4part.end(); ++it) + { + if(it->second > maxNHits) + { + maxNHits = it->second; + maxID = it->first; + } + } + + //Find the correct mc part and grab mother id + int isRadEle = -999; + int isRecEle = -999; + + + trueEleP.SetXYZ(-999,-999,-999); + truePosP.SetXYZ(-999,-999,-999); + if (mcParts_) { + float trueEleE = -1; + float truePosE = -1; + for(int i = 0; i < mcParts_->size(); i++) + { + int momPDG = mcParts_->at(i)->getMomPDG(); + if(mcParts_->at(i)->getPDG() == 11 && momPDG == isRadPDG_) + { + std::vector lP = mcParts_->at(i)->getMomentum(); + trueEleP.SetXYZ(lP[0],lP[1],lP[2]); + trueEleE = mcParts_->at(i)->getEnergy(); + + } + if(mcParts_->at(i)->getPDG() == -11 && momPDG == isRadPDG_) + { + std::vector lP = mcParts_->at(i)->getMomentum(); + truePosP.SetXYZ(lP[0],lP[1],lP[2]); + truePosE = mcParts_->at(i)->getEnergy(); + + } + if(trueEleP.X() != -999 && truePosP.X() != -999){ + truePsum = trueEleP.Mag() + trueEleP.Mag(); + trueEsum = trueEleE + truePosE; + } + + if(mcParts_->at(i)->getID() != maxID) continue; + //Default isRadPDG = 622 + if(momPDG == isRadPDG_) isRadEle = 1; + if(momPDG == 623) isRecEle = 1; + } + } + double momRatio = recEleP.Mag() / trueEleP.Mag(); + double momAngle = trueEleP.Angle(recEleP) * TMath::RadToDeg(); + if (!_reg_vtx_selectors[region]->passCutLt("momRatio_lt", momRatio, weight)) continue; + if (!_reg_vtx_selectors[region]->passCutGt("momRatio_gt", momRatio, weight)) continue; + if (!_reg_vtx_selectors[region]->passCutLt("momAngle_lt", momAngle, weight)) continue; + + if (!_reg_vtx_selectors[region]->passCutEq("isRadEle_eq", isRadEle, weight)) continue; + if (!_reg_vtx_selectors[region]->passCutEq("isNotRadEle_eq", isRadEle, weight)) continue; + if (!_reg_vtx_selectors[region]->passCutEq("isRecEle_eq", isRecEle, weight)) continue; + } + + goodVtx = vtx; + nGoodVtx++; + goodVtxs.push_back(vtx); + } // selected vertices + + //N selected vertices - this is quite a silly cut to make at the end. But okay. that's how we decided atm. + if (!_reg_vtx_selectors[region]->passCutEq("nVtxs_eq", nGoodVtx, weight)) + continue; + //Move to after N vertices cut (was filled before) + _reg_vtx_histos[region]->Fill1DHisto("n_vertices_h", nGoodVtx, weight); + + //Loop over all selected vertices in the region + for(std::vector::iterator it = goodVtxs.begin(); it != goodVtxs.end(); it++){ + + Vertex* vtx = *it; + + Particle* ele = nullptr; + Particle* pos = nullptr; + + if (!vtx || !_ah->GetParticlesFromVtx(vtx,ele,pos)) + continue; + + CalCluster eleClus = ele->getCluster(); + CalCluster posClus = pos->getCluster(); + + double corr_eleClusterTime = ele->getCluster().getTime() - timeOffset_; + double corr_posClusterTime = pos->getCluster().getTime() - timeOffset_; + + double ele_E = ele->getEnergy(); + double pos_E = pos->getEnergy(); + + //Compute analysis variables here. + Track ele_trk = ele->getTrack(); + Track pos_trk = pos->getTrack(); + //Get the shared info - TODO change and improve + // + //Apply Track Bias Corrections + for (const auto& pair : trackBiasCorrections_){ + ele_trk.applyCorrection(pair.first, pair.second); + pos_trk.applyCorrection(pair.first, pair.second); + } + /* + //Track Time Corrections + ele_trk.applyCorrection("track_time",eleTrackTimeBias_); + pos_trk.applyCorrection("track_time", posTrackTimeBias_); + ele_trk.applyCorrection("z0", beamPosCorrections_.at(1)); + pos_trk.applyCorrection("z0", beamPosCorrections_.at(1)); + */ + + // Track Momentum bias + + if (biasingTool_) { + + // Correct for wrong track momentum - Bug Fix + // In case there was mis-configuration during reco/hpstr-ntuple step, correct + // the momentum magnitude here using the right bField for the data taking year + + if (bFieldScaleFactor_ > 0) { + biasingTool_->updateWithBiasP(ele_trk,bFieldScaleFactor_); + biasingTool_->updateWithBiasP(pos_trk,bFieldScaleFactor_); + } + + + biasingTool_->updateWithBiasP(ele_trk); + biasingTool_->updateWithBiasP(pos_trk); + } + + + double invm_smear = 1.; + //std::cout << "[Good Vtxs] ele track p before smearing: " << ele_trk.getP() << std::endl; + if (smearingTool_) { + double unsmeared_prod = ele_trk.getP()*pos_trk.getP(); + double ele_smf = smearingTool_->updateWithSmearP(ele_trk); + double pos_smf = smearingTool_->updateWithSmearP(pos_trk); + double smeared_prod = ele_trk.getP()*pos_trk.getP(); + invm_smear = sqrt(smeared_prod/unsmeared_prod); + smearingTool_->updateVertexWithSmearP(vtx, ele_smf, pos_smf); + } + //std::cout << "[Good Vtxs] ele track p after smearing: " << ele_trk.getP() << std::endl; + + //Get the layers hit on each track + std::vector ele_hit_layers = ele_trk.getHitLayers(); + int ele_Si0 = 0; + int ele_Si1 = 0; + int ele_lastlayer = 0; + for(int i=0; i pos_hit_layers = pos_trk.getHitLayers(); + int pos_Si0 = 0; + int pos_Si1 = 0; + int pos_lastlayer = 0; + for(int i=0; i vtx_cov = vtx->getCovariance(); + float cxx = vtx_cov.at(0); + float cyx = vtx_cov.at(1); + float cyy = vtx_cov.at(2); + float czx = vtx_cov.at(3); + float czy = vtx_cov.at(4); + float czz = vtx_cov.at(5); + + + //MC Truth hits in first 4 sensors + int L1L2hitCode = 0; //hit code '1111' means truth ax+ster hits in L1_ele, L1_pos, L2_ele, L2_pos + int L1hitCode = 0; //hit code '1111' means truth in L1_ele_ax, L1_ele_ster, L1_pos_ax, L1_pos_ster + int L2hitCode = 0; // hit code '1111' means truth in L2_ele_ax, L2_ele_ster, L2_pos_ax, L2_pos_ster + if(!isData_){ + //Get hit codes. Only sure this works for 2016 KF as is. + utils::get2016KFMCTruthHitCodes(&ele_trk, &pos_trk, L1L2hitCode, L1hitCode, L2hitCode); + //L1L2 truth hit selection + if (!_reg_vtx_selectors[region]->passCutLt("hitCode_lt",((double)L1L2hitCode)-0.5, weight)) continue; + if (!_reg_vtx_selectors[region]->passCutGt("hitCode_gt",((double)L1L2hitCode)+0.5, weight)) continue; + //Fil hitcodes + _reg_vtx_histos[region]->Fill1DHisto("hitCode_h", L1L2hitCode,weight); + _reg_vtx_histos[region]->Fill1DHisto("L1hitCode_h", L1hitCode,weight); + _reg_vtx_histos[region]->Fill1DHisto("L2hitCode_h", L2hitCode,weight); + } + + //track isolations + //Only calculate isolations if both track L1 and L2 hits exist + bool hasL1ele = false; + bool hasL2ele = false; + _ah->InnermostLayerCheck(&ele_trk, hasL1ele, hasL2ele); + + bool hasL1pos = false; + bool hasL2pos = false; + _ah->InnermostLayerCheck(&pos_trk, hasL1pos, hasL2pos); + + TVector3 ele_mom; + //ele_mom.SetX(ele->getMomentum()[0]); + //ele_mom.SetY(ele->getMomentum()[1]); + //ele_mom.SetZ(ele->getMomentum()[2]); + ele_mom.SetX(ele_trk.getMomentum()[0]); + ele_mom.SetY(ele_trk.getMomentum()[1]); + ele_mom.SetZ(ele_trk.getMomentum()[2]); + + + TVector3 pos_mom; + //pos_mom.SetX(pos->getMomentum()[0]); + //pos_mom.SetY(pos->getMomentum()[1]); + //pos_mom.SetZ(pos->getMomentum()[2]); + pos_mom.SetX(pos_trk.getMomentum()[0]); + pos_mom.SetY(pos_trk.getMomentum()[1]); + pos_mom.SetZ(pos_trk.getMomentum()[2]); + + double ele_pos_dt = corr_eleClusterTime - corr_posClusterTime; + double psum = ele_mom.Mag()+pos_mom.Mag(); + + //Ele nHits + int ele2dHits = ele_trk.getTrackerHitCount(); + if (!ele_trk.isKalmanTrack()) + ele2dHits*=2; + + //pos nHits + int pos2dHits = pos_trk.getTrackerHitCount(); + + if(ts_ != nullptr) + { + _reg_vtx_histos[region]->Fill2DHisto("trig_count_hh", + ((int)ts_->prescaled.Single_3_Top)+((int)ts_->prescaled.Single_3_Bot), + ((int)ts_->prescaled.Single_2_Top)+((int)ts_->prescaled.Single_2_Bot)); + } + _reg_vtx_histos[region]->Fill1DHisto("n_vtx_h", vtxs_->size()); + + //Add the momenta to the tracks + //ele_trk.setMomentum(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); + //pos_trk.setMomentum(pos->getMomentum()[0],pos->getMomentum()[1],pos->getMomentum()[2]); + TVector3 recEleP(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); + TLorentzVector p_ele; + p_ele.SetPxPyPzE(ele_trk.getMomentum()[0],ele_trk.getMomentum()[1],ele_trk.getMomentum()[2], ele_E); + TLorentzVector p_pos; + p_pos.SetPxPyPzE(pos_trk.getMomentum()[0],pos_trk.getMomentum()[1],pos_trk.getMomentum()[2], pos_E); + + + _reg_vtx_histos[region]->Fill2DHistograms(vtx,weight); + _reg_vtx_histos[region]->Fill1DVertex(vtx, + ele, + pos, + &ele_trk, + &pos_trk, + weight); + + _reg_vtx_histos[region]->Fill1DHisto("ele_pos_clusTimeDiff_h", (corr_eleClusterTime - corr_posClusterTime), weight); + _reg_vtx_histos[region]->Fill1DHisto("ele_track_n2dhits_h", ele2dHits, weight); + _reg_vtx_histos[region]->Fill1DHisto("pos_track_n2dhits_h", pos2dHits, weight); + _reg_vtx_histos[region]->Fill1DHisto("vtx_Psum_h", p_ele.P()+p_pos.P(), weight); + _reg_vtx_histos[region]->Fill1DHisto("vtx_Esum_h", eleClus.getEnergy()+posClus.getEnergy(), weight); + _reg_vtx_histos[region]->Fill1DHisto("vtx_smear_InvM_h", invm_smear*(vtx->getInvMass()), weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_vtxZ_iso_hh", TMath::Min(ele_trk.getIsolation(0), ele_trk.getIsolation(1)), vtx->getZ(), weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_vtxZ_iso_hh", TMath::Min(pos_trk.getIsolation(0), pos_trk.getIsolation(1)), vtx->getZ(), weight); + _reg_vtx_histos[region]->Fill2DTrack(&ele_trk,weight,"ele_"); + _reg_vtx_histos[region]->Fill2DTrack(&pos_trk,weight,"pos_"); + _reg_vtx_histos[region]->Fill1DHisto("mcMass622_h",apMass); + _reg_vtx_histos[region]->Fill1DHisto("mcZ622_h",apZ); + _reg_vtx_histos[region]->Fill1DHisto("mcMass625_h",vdMass); + _reg_vtx_histos[region]->Fill1DHisto("mcZ625_h",vdZ); + + + //Just for the selected vertex + if(!isData_) + { + _reg_vtx_histos[region]->Fill2DHisto("vtx_Esum_vs_true_Esum_hh",eleClus.getEnergy()+posClus.getEnergy(), trueEsum, weight); + _reg_vtx_histos[region]->Fill2DHisto("vtx_Psum_vs_true_Psum_hh",p_ele.P()+p_pos.P(), truePsum, weight); + _reg_vtx_histos[region]->Fill1DHisto("true_vtx_psum_h",truePsum,weight); + } + + double reconz = vtx->getZ(); + double ele_trk_z0 = ele_trk.getZ0(); + double ele_trk_z0err = ele_trk.getZ0Err(); + double pos_trk_z0 = pos_trk.getZ0(); + double pos_trk_z0err = pos_trk.getZ0Err(); + + //DeltaZ + double deltaZ = std::abs( (ele_trk_z0/ele_trk.getTanLambda()) - (pos_trk_z0/pos_trk.getTanLambda()) ); + + //Project vertex to target + double vtx_proj_x = -999.9; + double vtx_proj_y = -999.9; + double vtx_proj_x_sig = -999.9; + double vtx_proj_y_sig = -999.9; + double vtx_proj_sig = -999.9; + if(!v0ProjectionFitsCfg_.empty()) + vtx_proj_sig = utils::v0_projection_to_target_significance(v0proj_fits_, evth_->getRunNumber(), + vtx_proj_x, vtx_proj_y, vtx_proj_x_sig, vtx_proj_y_sig, vtx->getX(), vtx->getY(), + reconz, vtx->getP().X(), vtx->getP().Y(), vtx->getP().Z()); + + _reg_vtx_histos[region]->Fill2DHisto("unc_vtx_x_v_unc_vtx_y_hh", vtx->getX(), vtx->getY()); + _reg_vtx_histos[region]->Fill2DHisto("unc_vtx_proj_x_v_unc_vtx_proj_y_hh", vtx_proj_x, vtx_proj_y); + _reg_vtx_histos[region]->Fill2DHisto("unc_vtx_proj_x_y_significance_hh", vtx_proj_x_sig, vtx_proj_y_sig); + _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_vtx_proj_significance_hh", vtx_proj_sig, reconz); + _reg_vtx_histos[region]->Fill2DHisto("vtx_track_reconz_v_Z0err_hh", ele_trk_z0err, reconz); + _reg_vtx_histos[region]->Fill2DHisto("vtx_track_reconz_v_Z0err_hh", pos_trk_z0err, reconz); + _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_z0_hh", ele_trk_z0, reconz); + _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_z0_hh", pos_trk_z0, reconz); + _reg_vtx_histos[region]->Fill2DHisto("vtx_track_recon_z_v_ABSdz0tanlambda_hh", std::abs((ele_trk_z0/ele_trk.getTanLambda()) - (pos_trk_z0/pos_trk.getTanLambda())), reconz); + _reg_vtx_histos[region]->Fill2DHisto("vtx_track_recon_z_v_dz0tanlambda_hh", ((ele_trk_z0/ele_trk.getTanLambda()) - (pos_trk_z0/pos_trk.getTanLambda())), reconz); + + _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_cxx_hh", cxx, reconz); + _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_cyy_hh", cyy, reconz); + _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_czz_hh", czz, reconz); + _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_cyx_hh", cyx, reconz); + _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_czx_hh", czx, reconz); + _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_czy_hh", czy, reconz); + _reg_vtx_histos[region]->Fill1DHisto("cxx_h", cxx); + _reg_vtx_histos[region]->Fill1DHisto("cyy_h", cyy); + _reg_vtx_histos[region]->Fill1DHisto("czz_h", czz); + _reg_vtx_histos[region]->Fill1DHisto("cyx_h", cyx); + _reg_vtx_histos[region]->Fill1DHisto("czx_h", czx); + _reg_vtx_histos[region]->Fill1DHisto("czy_h", czy); + _reg_vtx_histos[region]->Fill2DHisto("vtx_track_recon_z_v_z0tanlambda_hh", ele_trk_z0/ele_trk.getTanLambda(), reconz); + _reg_vtx_histos[region]->Fill2DHisto("vtx_track_recon_z_v_z0tanlambda_hh", pos_trk_z0/pos_trk.getTanLambda(), reconz); + _reg_vtx_histos[region]->Fill2DHisto("ele_clusT_v_ele_trackT_hh", ele_trk.getTrackTime(), corr_eleClusterTime, weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_clusT_v_pos_trackT_hh", pos_trk.getTrackTime(), corr_posClusterTime, weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_track_time_v_P_hh", ele_trk.getP(), ele_trk.getTrackTime(), weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_track_time_v_P_hh", pos_trk.getP(), pos_trk.getTrackTime(), weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_pos_clusTimeDiff_v_pSum_hh",ele_mom.Mag()+pos_mom.Mag(), ele_pos_dt, weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_cluster_energy_v_track_p_hh",ele_trk.getP(), eleClus.getEnergy(), weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_cluster_energy_v_track_p_hh",pos_trk.getP(), posClus.getEnergy(), weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_track_cluster_dt_v_EoverP_hh",eleClus.getEnergy()/ele_trk.getP(), ele_trk.getTrackTime() - corr_eleClusterTime, weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_track_cluster_dt_v_EoverP_hh",posClus.getEnergy()/pos_trk.getP(), pos_trk.getTrackTime() - corr_posClusterTime, weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_track_clus_dt_v_p_hh",ele_trk.getP(), ele_trk.getTrackTime() - corr_eleClusterTime, weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_track_clus_dt_v_p_hh",pos_trk.getP(), pos_trk.getTrackTime() - corr_posClusterTime, weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_z0_vs_pos_z0_hh",ele_trk.getZ0(), pos_trk.getZ0(), weight); + + //chi2 2d plots + _reg_vtx_histos[region]->Fill2DHisto("ele_track_chi2ndf_v_time_hh", ele_trk.getTrackTime(), ele_trk.getChi2Ndf(), weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_track_chi2ndf_v_p_hh", ele_trk.getP(), ele_trk.getChi2Ndf(), weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_track_chi2ndf_v_tanlambda_hh", ele_trk.getTanLambda(), ele_trk.getChi2Ndf(), weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_track_chi2ndf_v_n2dhits_hh", ele2dHits, ele_trk.getChi2Ndf(), weight); + + _reg_vtx_histos[region]->Fill2DHisto("pos_track_chi2ndf_v_time_hh", pos_trk.getTrackTime(), pos_trk.getChi2Ndf(), weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_track_chi2ndf_v_p_hh", pos_trk.getP(), pos_trk.getChi2Ndf(), weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_track_chi2ndf_v_tanlambda_hh", pos_trk.getTanLambda(), pos_trk.getChi2Ndf(), weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_track_chi2ndf_v_n2dhits_hh", pos2dHits, pos_trk.getChi2Ndf(), weight); + + _reg_vtx_histos[region]->Fill2DHisto("ele_track_p_v_tanlambda_hh", ele_trk.getTanLambda(), ele_trk.getP(), weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_track_p_v_tanlambda_hh", pos_trk.getTanLambda(), pos_trk.getP(), weight); + + + //1d histos + _reg_vtx_histos[region]->Fill1DHisto("ele_track_clus_dt_h", ele_trk.getTrackTime() - corr_eleClusterTime, weight); + _reg_vtx_histos[region]->Fill1DHisto("pos_track_clus_dt_h", pos_trk.getTrackTime() - corr_posClusterTime, weight); + + + //TODO put this in the Vertex! + TVector3 vtxPosSvt; + vtxPosSvt.SetX(vtx->getX()); + vtxPosSvt.SetY(vtx->getY()); + vtxPosSvt.SetZ(vtx->getZ()); + vtxPosSvt.RotateY(-0.0305); + + //Just for the selected vertex + if (makeFlatTuple_){ + if(!isData_){ + _reg_tuples[region]->setVariableValue("ap_true_vtx_z", apZ); + _reg_tuples[region]->setVariableValue("ap_true_vtx_mass", apMass); + _reg_tuples[region]->setVariableValue("ap_true_vtx_energy", apEnergy); + _reg_tuples[region]->setVariableValue("vd_true_vtx_z", vdZ); + _reg_tuples[region]->setVariableValue("vd_true_vtx_mass", vdMass); + _reg_tuples[region]->setVariableValue("vd_true_vtx_energy", vdEnergy); + _reg_tuples[region]->setVariableValue("hitCode", float(L1L2hitCode)); + _reg_tuples[region]->setVariableValue("L1hitCode", float(L1hitCode)); + _reg_tuples[region]->setVariableValue("L2hitCode", float(L2hitCode)); + } + + _reg_tuples[region]->setVariableValue("unc_vtx_mass", vtx->getInvMass()); + _reg_tuples[region]->setVariableValue("unc_vtx_z" , vtxPosSvt.Z()); + _reg_tuples[region]->setVariableValue("unc_vtx_chi2", vtx->getChi2()); + _reg_tuples[region]->setVariableValue("unc_vtx_psum", p_ele.P()+p_pos.P()); + _reg_tuples[region]->setVariableValue("unc_vtx_px", vtx->getP().X()); + _reg_tuples[region]->setVariableValue("unc_vtx_py", vtx->getP().Y()); + _reg_tuples[region]->setVariableValue("unc_vtx_pz", vtx->getP().Z()); + _reg_tuples[region]->setVariableValue("unc_vtx_x", vtx->getX()); + _reg_tuples[region]->setVariableValue("unc_vtx_y", vtx->getY()); + _reg_tuples[region]->setVariableValue("unc_vtx_proj_x", vtx_proj_x); + _reg_tuples[region]->setVariableValue("unc_vtx_proj_y", vtx_proj_y); + _reg_tuples[region]->setVariableValue("unc_vtx_proj_x_sig", vtx_proj_x_sig); + _reg_tuples[region]->setVariableValue("unc_vtx_proj_y_sig", vtx_proj_y_sig); + _reg_tuples[region]->setVariableValue("unc_vtx_proj_sig", vtx_proj_sig); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_pos_clust_dt", corr_eleClusterTime - corr_posClusterTime); + + _reg_tuples[region]->setVariableValue("unc_vtx_cxx", cxx); + _reg_tuples[region]->setVariableValue("unc_vtx_cyy", cyy); + _reg_tuples[region]->setVariableValue("unc_vtx_czz", czz); + _reg_tuples[region]->setVariableValue("unc_vtx_cyx", cyx); + _reg_tuples[region]->setVariableValue("unc_vtx_czy", czy); + _reg_tuples[region]->setVariableValue("unc_vtx_czx", czx); + _reg_tuples[region]->setVariableValue("unc_vtx_deltaZ", deltaZ); + + //track vars + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_p", ele_trk.getP()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_t", ele_trk.getTrackTime()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_d0", ele_trk.getD0()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_phi0", ele_trk.getPhi()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_omega", ele_trk.getOmega()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_tanLambda", ele_trk.getTanLambda()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_z0", ele_trk.getZ0()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_chi2ndf", ele_trk.getChi2Ndf()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_clust_dt", ele_trk.getTrackTime() - corr_eleClusterTime); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_z0Err",ele_trk.getZ0Err()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_d0Err", ele_trk.getD0Err()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_tanLambdaErr", ele_trk.getTanLambdaErr()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_PhiErr", ele_trk.getPhiErr()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_OmegaErr", ele_trk.getOmegaErr()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_nhits",ele2dHits); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_lastlayer",ele_lastlayer); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_si0",ele_Si0); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_si1",ele_Si1); + + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_p", pos_trk.getP()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_t", pos_trk.getTrackTime()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_d0", pos_trk.getD0()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_phi0", pos_trk.getPhi()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_omega", pos_trk.getOmega()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_tanLambda", pos_trk.getTanLambda()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_z0", pos_trk.getZ0()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_chi2ndf", pos_trk.getChi2Ndf()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_clust_dt", pos_trk.getTrackTime() - corr_posClusterTime); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_z0Err",pos_trk.getZ0Err()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_d0Err", pos_trk.getD0Err()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_tanLambdaErr", pos_trk.getTanLambdaErr()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_PhiErr", pos_trk.getPhiErr()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_OmegaErr", pos_trk.getOmegaErr()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_nhits",pos2dHits); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_lastlayer",pos_lastlayer); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_si0",pos_Si0); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_si1",pos_Si1); + + //clust vars + _reg_tuples[region]->setVariableValue("unc_vtx_ele_clust_E", eleClus.getEnergy()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_clust_x", eleClus.getPosition().at(0)); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_clust_corr_t",corr_eleClusterTime); + + _reg_tuples[region]->setVariableValue("unc_vtx_pos_clust_E", posClus.getEnergy()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_clust_x", posClus.getPosition().at(0)); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_clust_corr_t",corr_posClusterTime); + _reg_tuples[region]->setVariableValue("run_number", evth_->getRunNumber()); + + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_ecal_x", ele_trk.getPositionAtEcal().at(0)); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_ecal_y", ele_trk.getPositionAtEcal().at(1)); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_z", ele_trk.getPosition().at(2)); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_ecal_x", pos_trk.getPositionAtEcal().at(0)); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_ecal_y", pos_trk.getPositionAtEcal().at(1)); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_z", pos_trk.getPosition().at(2)); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_px", ele_trk.getMomentum().at(0)); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_py", ele_trk.getMomentum().at(1)); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_pz", ele_trk.getMomentum().at(2)); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_px", pos_trk.getMomentum().at(0)); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_py", pos_trk.getMomentum().at(1)); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_pz", pos_trk.getMomentum().at(2)); + + _reg_tuples[region]->fill(); + } + } + + }// regions + + return true; } void NewVertexAnaProcessor::finalize() { - //TODO clean this up a little. - outF_->cd(); - _vtx_histos->saveHistos(outF_,_vtx_histos->getName()); - outF_->cd(_vtx_histos->getName().c_str()); - vtxSelector->getCutFlowHisto()->Write(); - - outF_->cd(); - if(!isData_) - _mc_vtx_histos->saveHistos(outF_, _mc_vtx_histos->getName()); - //delete histos; - //histos = nullptr; - - - for (reg_it it = _reg_vtx_histos.begin(); it!=_reg_vtx_histos.end(); ++it) { - std::string dirName = anaName_+"_"+it->first; - (it->second)->saveHistos(outF_,dirName); - outF_->cd(dirName.c_str()); - _reg_vtx_selectors[it->first]->getCutFlowHisto()->Write(); - //Save tuples - if (makeFlatTuple_) - _reg_tuples[it->first]->writeTree(); - - } - - if(!isData_){ - for (reg_mc_it it = _reg_mc_vtx_histos.begin(); it!=_reg_mc_vtx_histos.end(); ++it) { - std::string dirName = anaName_+"_mc_"+it->first; - (it->second)->saveHistos(outF_,dirName); - outF_->cd(dirName.c_str()); - } - } - - outF_->Close(); + //TODO clean this up a little. + outF_->cd(); + _vtx_histos->saveHistos(outF_,_vtx_histos->getName()); + outF_->cd(_vtx_histos->getName().c_str()); + vtxSelector->getCutFlowHisto()->Write(); + + outF_->cd(); + if(!isData_) + _mc_vtx_histos->saveHistos(outF_, _mc_vtx_histos->getName()); + //delete histos; + //histos = nullptr; + + + for (reg_it it = _reg_vtx_histos.begin(); it!=_reg_vtx_histos.end(); ++it) { + std::string dirName = anaName_+"_"+it->first; + (it->second)->saveHistos(outF_,dirName); + outF_->cd(dirName.c_str()); + _reg_vtx_selectors[it->first]->getCutFlowHisto()->Write(); + //Save tuples + if (makeFlatTuple_) + _reg_tuples[it->first]->writeTree(); + + } + + if(!isData_){ + for (reg_mc_it it = _reg_mc_vtx_histos.begin(); it!=_reg_mc_vtx_histos.end(); ++it) { + std::string dirName = anaName_+"_mc_"+it->first; + (it->second)->saveHistos(outF_,dirName); + outF_->cd(dirName.c_str()); + } + } + + outF_->Close(); } diff --git a/utils/include/TrackSmearingTool.h b/utils/include/TrackSmearingTool.h index 819be035e..95a416770 100644 --- a/utils/include/TrackSmearingTool.h +++ b/utils/include/TrackSmearingTool.h @@ -12,6 +12,7 @@ //------------------// #include "Track.h" +#include "Vertex.h" class TFile; class TH1D; @@ -27,7 +28,8 @@ class TrackSmearingTool { const std::string& tracks = "KalmanFullTracks"); double smearTrackP(const Track& trk); - void updateWithSmearP(Track& trk); + double updateWithSmearP(Track& trk); + void updateVertexWithSmearP(Vertex* vtx, double ele_smear_factor, double pos_smear_factor); private: diff --git a/utils/src/TrackSmearingTool.cxx b/utils/src/TrackSmearingTool.cxx index 769e1dedb..24949249e 100644 --- a/utils/src/TrackSmearingTool.cxx +++ b/utils/src/TrackSmearingTool.cxx @@ -75,7 +75,7 @@ double TrackSmearingTool::smearTrackP(const Track& track) { } -void TrackSmearingTool::updateWithSmearP(Track& trk) { +double TrackSmearingTool::updateWithSmearP(Track& trk) { double smeared_magnitude = smearTrackP(trk); // updated momentum by scaling each coordinate by smeared/unsmeared // this takes the direction of the unsmeared momentum and applies @@ -85,5 +85,23 @@ void TrackSmearingTool::updateWithSmearP(Track& trk) { for (double& coordinate : momentum) coordinate *= (smeared_magnitude/unsmeared_magnitude); trk.setMomentum(momentum); + return (smeared_magnitude/unsmeared_magnitude); + } +void TrackSmearingTool::updateVertexWithSmearP(Vertex* vtx, double ele_smear_factor, double pos_smear_factor) { + TVector3 p1_corr, p2_corr; + double m_corr; + + p1_corr.SetX(vtx->getP1X()*ele_smear_factor); + p1_corr.SetY(vtx->getP1Y()*ele_smear_factor); + p1_corr.SetZ(vtx->getP1Z()*ele_smear_factor); + + p2_corr.SetX(vtx->getP2X()*pos_smear_factor); + p2_corr.SetY(vtx->getP2Y()*pos_smear_factor); + p2_corr.SetZ(vtx->getP2Z()*pos_smear_factor); + + m_corr = vtx->getInvMass() * sqrt(ele_smear_factor*pos_smear_factor); + + vtx->setVtxParameters(p1_corr, p2_corr, m_corr); +} From 5f68ef8ba7f1be00c497da1a68db14fb81a0c7e8 Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Wed, 5 Jun 2024 14:03:11 -0700 Subject: [PATCH 04/27] made some changes to this processor that should be kept --- processors/src/NewVertexAnaProcessor.cxx | 3046 +++++++++++----------- 1 file changed, 1530 insertions(+), 1516 deletions(-) diff --git a/processors/src/NewVertexAnaProcessor.cxx b/processors/src/NewVertexAnaProcessor.cxx index d33861638..b79f68cc9 100644 --- a/processors/src/NewVertexAnaProcessor.cxx +++ b/processors/src/NewVertexAnaProcessor.cxx @@ -18,1546 +18,1560 @@ NewVertexAnaProcessor::NewVertexAnaProcessor(const std::string& name, Process& p NewVertexAnaProcessor::~NewVertexAnaProcessor(){} void NewVertexAnaProcessor::configure(const ParameterSet& parameters) { - std::cout << "Configuring NewVertexAnaProcessor" <(); - - vtxSelector = std::make_shared(anaName_+"_"+"vtxSelection",selectionCfg_); - vtxSelector->setDebug(debug_); - vtxSelector->LoadSelection(); - - _vtx_histos = std::make_shared(anaName_+"_"+"vtxSelection"); - _vtx_histos->loadHistoConfig(histoCfg_); - _vtx_histos->DefineHistos(); - - if(!isData_){ - _mc_vtx_histos = std::make_shared(anaName_+"_mc_"+"vtxSelection"); - _mc_vtx_histos->loadHistoConfig(mcHistoCfg_); - _mc_vtx_histos->DefineHistos(); - _mc_vtx_histos->Define2DHistos(); - } - - //Ana corrections to misc parameters - /* - if(!anaCorrectionsCfg_.empty()){ - std::ifstream anac_file(anaCorrectionsCfg_); - anac_file >> anac_configs_; - anac_file.close(); - }*/ - - //Load Run Dependent V0 target projection fits from json - if(!v0ProjectionFitsCfg_.empty()){ - std::ifstream v0proj_file(v0ProjectionFitsCfg_); - v0proj_file >> v0proj_fits_; - v0proj_file.close(); - } - - //Run Dependent Corrections - //Beam Position - if(!beamPosCfg_.empty()){ - std::ifstream bpc_file(beamPosCfg_); - bpc_file >> bpc_configs_; - bpc_file.close(); - } - - //Track parameter bias corrections - if(!trackBiasCfg_.empty()){ - std::ifstream tbc_file(trackBiasCfg_); - tbc_file >> tbc_configs_; - tbc_file.close(); - } - - - - // histos = new MCAnaHistos(anaName_); - //histos->loadHistoConfig(histCfgFilename_) - //histos->DefineHistos(); - //histos->Define2DHistos(); - - - //For each region initialize plots - - for (unsigned int i_reg = 0; i_reg < regionSelections_.size(); i_reg++) { - std::string regname = AnaHelpers::getFileName(regionSelections_[i_reg],false); - std::cout<<"Setting up region:: " << regname <(anaName_+"_"+regname, regionSelections_[i_reg]); - _reg_vtx_selectors[regname]->setDebug(debug_); - _reg_vtx_selectors[regname]->LoadSelection(); - - _reg_vtx_histos[regname] = std::make_shared(anaName_+"_"+regname); - _reg_vtx_histos[regname]->loadHistoConfig(histoCfg_); - _reg_vtx_histos[regname]->DefineHistos(); - - - if(!isData_){ - _reg_mc_vtx_histos[regname] = std::make_shared(anaName_+"_mc_"+regname); - _reg_mc_vtx_histos[regname]->loadHistoConfig(mcHistoCfg_); - _reg_mc_vtx_histos[regname]->DefineHistos(); - } - - //Build a flat tuple for vertex and track params - if (makeFlatTuple_){ - _reg_tuples[regname] = std::make_shared(anaName_+"_"+regname+"_tree"); - - //vtx vars - _reg_tuples[regname]->addVariable("unc_vtx_mass"); - _reg_tuples[regname]->addVariable("unc_vtx_z"); - _reg_tuples[regname]->addVariable("unc_vtx_chi2"); - _reg_tuples[regname]->addVariable("unc_vtx_psum"); - _reg_tuples[regname]->addVariable("unc_vtx_px"); - _reg_tuples[regname]->addVariable("unc_vtx_py"); - _reg_tuples[regname]->addVariable("unc_vtx_pz"); - _reg_tuples[regname]->addVariable("unc_vtx_x"); - _reg_tuples[regname]->addVariable("unc_vtx_y"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_pos_clus_dt"); - _reg_tuples[regname]->addVariable("run_number"); - _reg_tuples[regname]->addVariable("unc_vtx_cxx"); - _reg_tuples[regname]->addVariable("unc_vtx_cyy"); - _reg_tuples[regname]->addVariable("unc_vtx_czz"); - _reg_tuples[regname]->addVariable("unc_vtx_cyx"); - _reg_tuples[regname]->addVariable("unc_vtx_czy"); - _reg_tuples[regname]->addVariable("unc_vtx_czx"); - _reg_tuples[regname]->addVariable("unc_vtx_proj_x"); - _reg_tuples[regname]->addVariable("unc_vtx_proj_y"); - _reg_tuples[regname]->addVariable("unc_vtx_proj_x_sig"); - _reg_tuples[regname]->addVariable("unc_vtx_proj_y_sig"); - _reg_tuples[regname]->addVariable("unc_vtx_proj_sig"); - _reg_tuples[regname]->addVariable("unc_vtx_deltaZ"); - - - //track vars - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_p"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_t"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_d0"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_phi0"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_omega"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_tanLambda"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_z0"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_chi2ndf"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_clust_dt"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_z0Err"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_d0Err"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_tanLambdaErr"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_PhiErr"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_OmegaErr"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_L1_isolation"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_nhits"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_lastlayer"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_si0"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_si1"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_ecal_x"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_ecal_y"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_z"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_px"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_py"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_track_pz"); - - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_clust_dt"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_p"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_t"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_d0"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_phi0"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_omega"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_tanLambda"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_z0"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_chi2ndf"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_z0Err"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_d0Err"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_tanLambdaErr"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_PhiErr"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_OmegaErr"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_L1_isolation"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_nhits"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_lastlayer"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_si0"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_si1"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_ecal_x"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_ecal_y"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_z"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_px"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_py"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_track_pz"); - - //clust vars - _reg_tuples[regname]->addVariable("unc_vtx_ele_clust_E"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_clust_x"); - _reg_tuples[regname]->addVariable("unc_vtx_ele_clust_corr_t"); - - _reg_tuples[regname]->addVariable("unc_vtx_pos_clust_E"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_clust_x"); - _reg_tuples[regname]->addVariable("unc_vtx_pos_clust_corr_t"); - - if(!isData_) - { - _reg_tuples[regname]->addVariable("true_vtx_z"); - _reg_tuples[regname]->addVariable("true_vtx_mass"); - _reg_tuples[regname]->addVariable("ap_true_vtx_z"); - _reg_tuples[regname]->addVariable("ap_true_vtx_mass"); - _reg_tuples[regname]->addVariable("ap_true_vtx_energy"); - _reg_tuples[regname]->addVariable("vd_true_vtx_z"); - _reg_tuples[regname]->addVariable("vd_true_vtx_mass"); - _reg_tuples[regname]->addVariable("vd_true_vtx_energy"); - _reg_tuples[regname]->addVariable("hitCode"); - _reg_tuples[regname]->addVariable("L1hitCode"); - _reg_tuples[regname]->addVariable("L2hitCode"); - } - } - - _regions.push_back(regname); - } - - // Get list of branches in tree to help protect accessing them - int nBr = tree_->GetListOfBranches()->GetEntries(); - if (debug_) std::cout << "Tree has " << nBr << " branches" << std::endl; - for(int iBr = 0; iBr < nBr; iBr++) - { - TBranch *br = dynamic_cast(tree_->GetListOfBranches()->At(iBr)); - brMap_.insert(std::map::value_type(br->GetName(), 1)); - if (debug_) std::cout << br->GetName() << ": " << brMap_[br->GetName()] << std::endl; - } - - //init Reading Tree - tree_->SetBranchAddress("EventHeader", &evth_ , &bevth_); - if (brMap_.find(tsColl_.c_str()) != brMap_.end()) tree_->SetBranchAddress(tsColl_.c_str(), &ts_ , &bts_); - tree_->SetBranchAddress(vtxColl_.c_str(), &vtxs_ , &bvtxs_); - //tree_->SetBranchAddress(hitColl_.c_str(), &hits_ , &bhits_); - if (brMap_.find(hitColl_.c_str()) != brMap_.end()) tree_->SetBranchAddress(hitColl_.c_str(), &hits_ , &bhits_); - if(!isData_ && !mcColl_.empty()) tree_->SetBranchAddress(mcColl_.c_str() , &mcParts_, &bmcParts_); - - if (not pSmearingFile_.empty()) { - smearingTool_ = std::make_shared(pSmearingFile_,true, pSmearingSeed_); - } - - if (not pBiasingFile_.empty()) { - biasingTool_ = std::make_shared(pBiasingFile_); - } + tree_ = tree; + _ah = std::make_shared(); + + vtxSelector = std::make_shared(anaName_+"_"+"vtxSelection",selectionCfg_); + vtxSelector->setDebug(debug_); + vtxSelector->LoadSelection(); + + _vtx_histos = std::make_shared(anaName_+"_"+"vtxSelection"); + _vtx_histos->loadHistoConfig(histoCfg_); + _vtx_histos->DefineHistos(); + + if(!isData_){ + _mc_vtx_histos = std::make_shared(anaName_+"_mc_"+"vtxSelection"); + _mc_vtx_histos->loadHistoConfig(mcHistoCfg_); + _mc_vtx_histos->DefineHistos(); + _mc_vtx_histos->Define2DHistos(); + } + + //Ana corrections to misc parameters + /* + if(!anaCorrectionsCfg_.empty()){ + std::ifstream anac_file(anaCorrectionsCfg_); + anac_file >> anac_configs_; + anac_file.close(); + }*/ + + //Load Run Dependent V0 target projection fits from json + if(!v0ProjectionFitsCfg_.empty()){ + std::ifstream v0proj_file(v0ProjectionFitsCfg_); + v0proj_file >> v0proj_fits_; + v0proj_file.close(); + } + + //Run Dependent Corrections + //Beam Position + if(!beamPosCfg_.empty()){ + std::ifstream bpc_file(beamPosCfg_); + bpc_file >> bpc_configs_; + bpc_file.close(); + } + + //Track parameter bias corrections + if(!trackBiasCfg_.empty()){ + std::ifstream tbc_file(trackBiasCfg_); + tbc_file >> tbc_configs_; + tbc_file.close(); + } + + + + // histos = new MCAnaHistos(anaName_); + //histos->loadHistoConfig(histCfgFilename_) + //histos->DefineHistos(); + //histos->Define2DHistos(); + + + //For each region initialize plots + + for (unsigned int i_reg = 0; i_reg < regionSelections_.size(); i_reg++) { + std::string regname = AnaHelpers::getFileName(regionSelections_[i_reg],false); + std::cout<<"Setting up region:: " << regname <(anaName_+"_"+regname, regionSelections_[i_reg]); + _reg_vtx_selectors[regname]->setDebug(debug_); + _reg_vtx_selectors[regname]->LoadSelection(); + + _reg_vtx_histos[regname] = std::make_shared(anaName_+"_"+regname); + _reg_vtx_histos[regname]->loadHistoConfig(histoCfg_); + _reg_vtx_histos[regname]->DefineHistos(); + + + if(!isData_){ + _reg_mc_vtx_histos[regname] = std::make_shared(anaName_+"_mc_"+regname); + _reg_mc_vtx_histos[regname]->loadHistoConfig(mcHistoCfg_); + _reg_mc_vtx_histos[regname]->DefineHistos(); + } + + //Build a flat tuple for vertex and track params + if (makeFlatTuple_){ + _reg_tuples[regname] = std::make_shared(anaName_+"_"+regname+"_tree"); + + //vtx vars + _reg_tuples[regname]->addVariable("unc_vtx_mass"); + _reg_tuples[regname]->addVariable("unc_vtx_z"); + _reg_tuples[regname]->addVariable("unc_vtx_chi2"); + _reg_tuples[regname]->addVariable("unc_vtx_psum"); + _reg_tuples[regname]->addVariable("unc_vtx_px"); + _reg_tuples[regname]->addVariable("unc_vtx_py"); + _reg_tuples[regname]->addVariable("unc_vtx_pz"); + _reg_tuples[regname]->addVariable("unc_vtx_x"); + _reg_tuples[regname]->addVariable("unc_vtx_y"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_pos_clus_dt"); + _reg_tuples[regname]->addVariable("run_number"); + _reg_tuples[regname]->addVariable("unc_vtx_cxx"); + _reg_tuples[regname]->addVariable("unc_vtx_cyy"); + _reg_tuples[regname]->addVariable("unc_vtx_czz"); + _reg_tuples[regname]->addVariable("unc_vtx_cyx"); + _reg_tuples[regname]->addVariable("unc_vtx_czy"); + _reg_tuples[regname]->addVariable("unc_vtx_czx"); + _reg_tuples[regname]->addVariable("unc_vtx_proj_x"); + _reg_tuples[regname]->addVariable("unc_vtx_proj_y"); + _reg_tuples[regname]->addVariable("unc_vtx_proj_x_sig"); + _reg_tuples[regname]->addVariable("unc_vtx_proj_y_sig"); + _reg_tuples[regname]->addVariable("unc_vtx_proj_sig"); + _reg_tuples[regname]->addVariable("unc_vtx_deltaZ"); + + + //track vars + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_p"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_t"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_d0"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_phi0"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_omega"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_tanLambda"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_z0"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_chi2ndf"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_clust_dt"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_z0Err"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_d0Err"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_tanLambdaErr"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_PhiErr"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_OmegaErr"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_L1_isolation"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_nhits"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_lastlayer"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_si0"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_si1"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_ecal_x"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_ecal_y"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_z"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_px"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_py"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_track_pz"); + + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_clust_dt"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_p"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_t"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_d0"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_phi0"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_omega"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_tanLambda"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_z0"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_chi2ndf"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_z0Err"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_d0Err"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_tanLambdaErr"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_PhiErr"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_OmegaErr"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_L1_isolation"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_nhits"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_lastlayer"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_si0"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_si1"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_ecal_x"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_ecal_y"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_z"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_px"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_py"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_track_pz"); + + //clust vars + _reg_tuples[regname]->addVariable("unc_vtx_ele_clust_E"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_clust_x"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_clust_corr_t"); + + _reg_tuples[regname]->addVariable("unc_vtx_pos_clust_E"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_clust_x"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_clust_corr_t"); + + if(!isData_) + { + _reg_tuples[regname]->addVariable("true_vtx_z"); + _reg_tuples[regname]->addVariable("true_vtx_mass"); + _reg_tuples[regname]->addVariable("ap_true_vtx_z"); + _reg_tuples[regname]->addVariable("ap_true_vtx_mass"); + _reg_tuples[regname]->addVariable("ap_true_vtx_energy"); + _reg_tuples[regname]->addVariable("vd_true_vtx_z"); + _reg_tuples[regname]->addVariable("vd_true_vtx_mass"); + _reg_tuples[regname]->addVariable("vd_true_vtx_energy"); + _reg_tuples[regname]->addVariable("hitCode"); + _reg_tuples[regname]->addVariable("L1hitCode"); + _reg_tuples[regname]->addVariable("L2hitCode"); + } + } + + _regions.push_back(regname); + } + + // Get list of branches in tree to help protect accessing them + int nBr = tree_->GetListOfBranches()->GetEntries(); + if (debug_) std::cout << "Tree has " << nBr << " branches" << std::endl; + for(int iBr = 0; iBr < nBr; iBr++) + { + TBranch *br = dynamic_cast(tree_->GetListOfBranches()->At(iBr)); + brMap_.insert(std::map::value_type(br->GetName(), 1)); + if (debug_) std::cout << br->GetName() << ": " << brMap_[br->GetName()] << std::endl; + } + + //init Reading Tree + tree_->SetBranchAddress("EventHeader", &evth_ , &bevth_); + if (brMap_.find(tsColl_.c_str()) != brMap_.end()) tree_->SetBranchAddress(tsColl_.c_str(), &ts_ , &bts_); + tree_->SetBranchAddress(vtxColl_.c_str(), &vtxs_ , &bvtxs_); + //tree_->SetBranchAddress(hitColl_.c_str(), &hits_ , &bhits_); + if (brMap_.find(hitColl_.c_str()) != brMap_.end()) tree_->SetBranchAddress(hitColl_.c_str(), &hits_ , &bhits_); + if(!isData_ && !mcColl_.empty()) tree_->SetBranchAddress(mcColl_.c_str() , &mcParts_, &bmcParts_); + + if (not pSmearingFile_.empty()) { + smearingTool_ = std::make_shared(pSmearingFile_,true, pSmearingSeed_); + } + + if (not pBiasingFile_.empty()) { + biasingTool_ = std::make_shared(pBiasingFile_); + } } bool NewVertexAnaProcessor::process(IEvent* ievent) { - if(debug_) { - std:: cout << "----------------- Event " << evth_->getEventNumber() << " -----------------" << std::endl; - } - HpsEvent* hps_evt = (HpsEvent*) ievent; - double weight = 1.; - int run_number = evth_->getRunNumber(); - int closest_run; - if (debug_) std::cout << "Check pbc_configs" << std::endl; - if(!bpc_configs_.empty()){ - for(auto run : bpc_configs_.items()){ - int check_run = std::stoi(run.key()); - if(check_run > run_number) - break; - else{ - closest_run = check_run; - } - } - beamPosCorrections_ = {bpc_configs_[std::to_string(closest_run)]["unrotated_mean_x"], - bpc_configs_[std::to_string(closest_run)]["unrotated_mean_y"]}; - } - - //Load track parameter bias corrections if specified - if(!tbc_configs_.empty()){ - for(auto entry : tbc_configs_.items()){ - trackBiasCorrections_[entry.key()] = entry.value(); - } - } - - - //Get "true" values - //AP - double apMass = -0.9; - double apZ = -0.9; - double apEnergy = -0.9; - //Simp - double vdMass = -0.9; - double vdZ = -0.9; - double vdEnergy = -0.9; - - if (debug_) std::cout << "plot trigger info" << std::endl; - //Plot info about which trigger bits are present in the event - if (ts_ != nullptr) - { - _vtx_histos->Fill2DHisto("trig_count_hh", - ((int)ts_->prescaled.Single_3_Top)+((int)ts_->prescaled.Single_3_Bot), - ((int)ts_->prescaled.Single_2_Top)+((int)ts_->prescaled.Single_2_Bot)); - } - if (debug_) std::cout << "plot vtx N" << std::endl; - _vtx_histos->Fill1DHisto("n_vtx_h", vtxs_->size()); - - if (mcParts_) { - for(int i = 0; i < mcParts_->size(); i++) - { - if(mcParts_->at(i)->getPDG() == 622) - { - apMass = mcParts_->at(i)->getMass(); - apZ = mcParts_->at(i)->getVertexPosition().at(2); - apEnergy = mcParts_->at(i)->getEnergy(); - } - if(mcParts_->at(i)->getPDG() == 625) - { - vdMass = mcParts_->at(i)->getMass(); - vdZ = mcParts_->at(i)->getVertexPosition().at(2); - vdEnergy = mcParts_->at(i)->getEnergy(); - } - } - - if (!isData_) _mc_vtx_histos->FillMCParticles(mcParts_, analysis_); - } - //Store processed number of events - std::vector selected_vtxs; - bool passVtxPresel = false; - - if(debug_){ - std::cout<<"Number of vertices found in event: "<< vtxs_->size()<size(); i_vtx++ ) { - vtxSelector->getCutFlowHisto()->Fill(0.,weight); - - Vertex* vtx = vtxs_->at(i_vtx); - Particle* ele = nullptr; - Particle* pos = nullptr; - - //Trigger requirement - *really hate* having to do it here for each vertex. - - if (isData_) { - if (!vtxSelector->passCutEq("Pair1_eq",(int)evth_->isPair1Trigger(),weight)) - break; - if (!vtxSelector->passCutEq("Single0_eq",(int)evth_->isSingle0Trigger(),weight)) - break; - } - - bool foundParts = _ah->GetParticlesFromVtx(vtx,ele,pos); - if (!foundParts) { - if(debug_) std::cout<<"NewVertexAnaProcessor::WARNING::Found vtx without ele/pos. Skip."<getTrack(); - Track pos_trk = pos->getTrack(); - - if (debug_) { - std::cout<<"Check Ele/Pos Track momenta"< 0) { - biasingTool_->updateWithBiasP(ele_trk,bFieldScaleFactor_); - biasingTool_->updateWithBiasP(pos_trk,bFieldScaleFactor_); - } - - biasingTool_->updateWithBiasP(ele_trk); - biasingTool_->updateWithBiasP(pos_trk); - } - - if (debug_) { - std::cout<<"Corrected Ele/Pos Track momenta"<updateWithSmearP(ele_trk); - double pos_smf = smearingTool_->updateWithSmearP(pos_trk); - double smeared_prod = ele_trk.getP()*pos_trk.getP(); - invm_smear = sqrt(smeared_prod/unsmeared_prod); - smearingTool_->updateVertexWithSmearP(vtx, ele_smf, pos_smf); - } - //std::cout << "[Before Preselection] ele track p after smearing: " << ele_trk.getP() << std::endl; - - - //Add the momenta to the tracks - do not do that - //ele_trk.setMomentum(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); - //pos_trk.setMomentum(pos->getMomentum()[0],pos->getMomentum()[1],pos->getMomentum()[2]); - - double ele_E = ele->getEnergy(); - double pos_E = pos->getEnergy(); - if (debug_) std::cout << "got tracks" << std::endl; - - CalCluster eleClus = ele->getCluster(); - CalCluster posClus = pos->getCluster(); - - - //Compute analysis variables here. - TLorentzVector p_ele; - p_ele.SetPxPyPzE(ele_trk.getMomentum()[0],ele_trk.getMomentum()[1],ele_trk.getMomentum()[2],ele->getEnergy()); - TLorentzVector p_pos; - p_pos.SetPxPyPzE(pos_trk.getMomentum()[0],pos_trk.getMomentum()[1],pos_trk.getMomentum()[2],ele->getEnergy()); - - //Tracks in opposite volumes - useless - //if (!vtxSelector->passCutLt("eleposTanLambaProd_lt",ele_trk.getTanLambda() * pos_trk.getTanLambda(),weight)) - // continue; - - if (debug_) std::cout << "start selection" << std::endl; - //Ele Track Time - if (!vtxSelector->passCutLt("eleTrkTime_lt",fabs(ele_trk.getTrackTime()),weight)) - continue; - - //Pos Track Time - if (!vtxSelector->passCutLt("posTrkTime_lt",fabs(pos_trk.getTrackTime()),weight)) - continue; - - //Ele Track-cluster match - if (!vtxSelector->passCutLt("eleTrkCluMatch_lt",ele->getGoodnessOfPID(),weight)) - continue; - - //Pos Track-cluster match - if (!vtxSelector->passCutLt("posTrkCluMatch_lt",pos->getGoodnessOfPID(),weight)) - continue; - - //Require Positron Cluster exists - if (!vtxSelector->passCutGt("posClusE_gt",posClus.getEnergy(),weight)) - continue; - - //Require Positron Cluster does NOT exists - if (!vtxSelector->passCutLt("posClusE_lt",posClus.getEnergy(),weight)) - continue; - - - double corr_eleClusterTime = ele->getCluster().getTime() - timeOffset_; - double corr_posClusterTime = pos->getCluster().getTime() - timeOffset_; - - double botClusTime = 0.0; - if(ele->getCluster().getPosition().at(1) < 0.0) botClusTime = ele->getCluster().getTime(); - else botClusTime = pos->getCluster().getTime(); - - //Bottom Cluster Time - if (!vtxSelector->passCutLt("botCluTime_lt", botClusTime, weight)) - continue; + if(debug_) { + std:: cout << "----------------- Event " << evth_->getEventNumber() << " -----------------" << std::endl; + } + + HpsEvent* hps_evt = (HpsEvent*) ievent; + double weight = 1.; + int run_number = evth_->getRunNumber(); + int closest_run; + if (debug_) std::cout << "Check pbc_configs" << std::endl; + if(!bpc_configs_.empty()){ + for(auto run : bpc_configs_.items()){ + int check_run = std::stoi(run.key()); + if(check_run > run_number) + break; + else{ + closest_run = check_run; + } + } + beamPosCorrections_ = {bpc_configs_[std::to_string(closest_run)]["unrotated_mean_x"], + bpc_configs_[std::to_string(closest_run)]["unrotated_mean_y"]}; + } + + //Load track parameter bias corrections if specified + if(!tbc_configs_.empty()){ + for(auto entry : tbc_configs_.items()){ + trackBiasCorrections_[entry.key()] = entry.value(); + } + } + + + //Get "true" values + //AP + double apMass = -0.9; + double apZ = -0.9; + double apEnergy = -0.9; + //Simp + double vdMass = -0.9; + double vdZ = -0.9; + double vdEnergy = -0.9; + + if (debug_) std::cout << "plot trigger info" << std::endl; + //Plot info about which trigger bits are present in the event + if (ts_ != nullptr) + { + _vtx_histos->Fill2DHisto("trig_count_hh", + ((int)ts_->prescaled.Single_3_Top)+((int)ts_->prescaled.Single_3_Bot), + ((int)ts_->prescaled.Single_2_Top)+((int)ts_->prescaled.Single_2_Bot)); + } + if (debug_) std::cout << "plot vtx N" << std::endl; + _vtx_histos->Fill1DHisto("n_vtx_h", vtxs_->size()); + + if (mcParts_) { + for(int i = 0; i < mcParts_->size(); i++) + { + if(mcParts_->at(i)->getPDG() == 622) + { + apMass = mcParts_->at(i)->getMass(); + apZ = mcParts_->at(i)->getVertexPosition().at(2); + apEnergy = mcParts_->at(i)->getEnergy(); + } + if(mcParts_->at(i)->getPDG() == 625) + { + vdMass = mcParts_->at(i)->getMass(); + vdZ = mcParts_->at(i)->getVertexPosition().at(2); + vdEnergy = mcParts_->at(i)->getEnergy(); + } + } + + if (!isData_) _mc_vtx_histos->FillMCParticles(mcParts_, analysis_); + } + //Store processed number of events + std::vector selected_vtxs; + bool passVtxPresel = false; + + if(debug_){ + std::cout<<"Number of vertices found in event: "<< vtxs_->size()<size(); i_vtx++ ) { + vtxSelector->getCutFlowHisto()->Fill(0.,weight); + + Vertex* vtx = vtxs_->at(i_vtx); + Particle* ele = nullptr; + Particle* pos = nullptr; + + //Trigger requirement - *really hate* having to do it here for each vertex. + + if (isData_) { + if (!vtxSelector->passCutEq("Pair1_eq",(int)evth_->isPair1Trigger(),weight)) + break; + if (!vtxSelector->passCutEq("Single0_eq",(int)evth_->isSingle0Trigger(),weight)) + break; + } + + //05072023 Test Cam's hypothesis. My understanding is that signal is injected every 500ns (250 events), so + //Triggered event time mod 500 should be ~0. + //MC beam background is inserted uniformly in all the events. + //Cam and Sarah find that, looking at triggered pulser data, the triggered events fall off in time away from event t mod 500 + //However, the MC beam is uniform across this distribution, insinuating that there are way more triggered backgrounds in MC + //than we actually expect in data...I think... + + if(!isData_){ + if (!vtxSelector->passCutLt("evtTimeMod500_lt", fabs(evth_->getEventTime()%500),weight)) + break; + } + + + bool foundParts = _ah->GetParticlesFromVtx(vtx,ele,pos); + if (!foundParts) { + if(debug_) std::cout<<"NewVertexAnaProcessor::WARNING::Found vtx without ele/pos. Skip."<getTrack(); + Track pos_trk = pos->getTrack(); + + if (debug_) { + std::cout<<"Check Ele/Pos Track momenta"< 0) { + biasingTool_->updateWithBiasP(ele_trk,bFieldScaleFactor_); + biasingTool_->updateWithBiasP(pos_trk,bFieldScaleFactor_); + } + + biasingTool_->updateWithBiasP(ele_trk); + biasingTool_->updateWithBiasP(pos_trk); + } + + if (debug_) { + std::cout<<"Corrected Ele/Pos Track momenta"<updateWithSmearP(ele_trk); + double pos_smf = smearingTool_->updateWithSmearP(pos_trk); + double smeared_prod = ele_trk.getP()*pos_trk.getP(); + invm_smear = sqrt(smeared_prod/unsmeared_prod); + smearingTool_->updateVertexWithSmearP(vtx, ele_smf, pos_smf); + } + //std::cout << "[Before Preselection] ele track p after smearing: " << ele_trk.getP() << std::endl; + + + //Add the momenta to the tracks - do not do that + //ele_trk.setMomentum(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); + //pos_trk.setMomentum(pos->getMomentum()[0],pos->getMomentum()[1],pos->getMomentum()[2]); + + double ele_E = ele->getEnergy(); + double pos_E = pos->getEnergy(); + if (debug_) std::cout << "got tracks" << std::endl; + + CalCluster eleClus = ele->getCluster(); + CalCluster posClus = pos->getCluster(); + + + //Compute analysis variables here. + TLorentzVector p_ele; + p_ele.SetPxPyPzE(ele_trk.getMomentum()[0],ele_trk.getMomentum()[1],ele_trk.getMomentum()[2],ele->getEnergy()); + TLorentzVector p_pos; + p_pos.SetPxPyPzE(pos_trk.getMomentum()[0],pos_trk.getMomentum()[1],pos_trk.getMomentum()[2],ele->getEnergy()); + + //Tracks in opposite volumes - useless + //if (!vtxSelector->passCutLt("eleposTanLambaProd_lt",ele_trk.getTanLambda() * pos_trk.getTanLambda(),weight)) + // continue; + + if (debug_) std::cout << "start selection" << std::endl; + //Ele Track Time + if (!vtxSelector->passCutLt("eleTrkTime_lt",fabs(ele_trk.getTrackTime()),weight)) + continue; + + //Pos Track Time + if (!vtxSelector->passCutLt("posTrkTime_lt",fabs(pos_trk.getTrackTime()),weight)) + continue; + + //Ele Track-cluster match + if (!vtxSelector->passCutLt("eleTrkCluMatch_lt",ele->getGoodnessOfPID(),weight)) + continue; + + //Pos Track-cluster match + if (!vtxSelector->passCutLt("posTrkCluMatch_lt",pos->getGoodnessOfPID(),weight)) + continue; + + //Require Positron Cluster exists + if (!vtxSelector->passCutGt("posClusE_gt",posClus.getEnergy(),weight)) + continue; + + //Require Positron Cluster does NOT exists + if (!vtxSelector->passCutLt("posClusE_lt",posClus.getEnergy(),weight)) + continue; + + + double corr_eleClusterTime = ele->getCluster().getTime() - timeOffset_; + double corr_posClusterTime = pos->getCluster().getTime() - timeOffset_; + + double botClusTime = 0.0; + if(ele->getCluster().getPosition().at(1) < 0.0) botClusTime = ele->getCluster().getTime(); + else botClusTime = pos->getCluster().getTime(); + + //Bottom Cluster Time + if (!vtxSelector->passCutLt("botCluTime_lt", botClusTime, weight)) + continue; - if (!vtxSelector->passCutGt("botCluTime_gt", botClusTime, weight)) - continue; + if (!vtxSelector->passCutGt("botCluTime_gt", botClusTime, weight)) + continue; - //Ele Pos Cluster Time Difference - if (!vtxSelector->passCutLt("eleposCluTimeDiff_lt",fabs(corr_eleClusterTime - corr_posClusterTime),weight)) - continue; + //Ele Pos Cluster Time Difference + if (!vtxSelector->passCutLt("eleposCluTimeDiff_lt",fabs(corr_eleClusterTime - corr_posClusterTime),weight)) + continue; - //Ele Track-Cluster Time Difference - if (!vtxSelector->passCutLt("eleTrkCluTimeDiff_lt",fabs(ele_trk.getTrackTime() - corr_eleClusterTime),weight)) - continue; + //Ele Track-Cluster Time Difference + if (!vtxSelector->passCutLt("eleTrkCluTimeDiff_lt",fabs(ele_trk.getTrackTime() - corr_eleClusterTime),weight)) + continue; - //Pos Track-Cluster Time Difference - if (!vtxSelector->passCutLt("posTrkCluTimeDiff_lt",fabs(pos_trk.getTrackTime() - corr_posClusterTime),weight)) - continue; + //Pos Track-Cluster Time Difference + if (!vtxSelector->passCutLt("posTrkCluTimeDiff_lt",fabs(pos_trk.getTrackTime() - corr_posClusterTime),weight)) + continue; - TVector3 ele_mom; - //ele_mom.SetX(ele->getMomentum()[0]); - //ele_mom.SetY(ele->getMomentum()[1]); - //ele_mom.SetZ(ele->getMomentum()[2]); - ele_mom.SetX(ele_trk.getMomentum()[0]); - ele_mom.SetY(ele_trk.getMomentum()[1]); - ele_mom.SetZ(ele_trk.getMomentum()[2]); - - - TVector3 pos_mom; - //pos_mom.SetX(pos->getMomentum()[0]); - //pos_mom.SetY(pos->getMomentum()[1]); - //pos_mom.SetZ(pos->getMomentum()[2]); - pos_mom.SetX(pos_trk.getMomentum()[0]); - pos_mom.SetY(pos_trk.getMomentum()[1]); - pos_mom.SetZ(pos_trk.getMomentum()[2]); - - - - //Ele Track Quality - Chi2 - if (!vtxSelector->passCutLt("eleTrkChi2_lt",ele_trk.getChi2(),weight)) - continue; - - //Pos Track Quality - Chi2 - if (!vtxSelector->passCutLt("posTrkChi2_lt",pos_trk.getChi2(),weight)) - continue; - - //Ele Track Quality - Chi2Ndf - if (!vtxSelector->passCutLt("eleTrkChi2Ndf_lt",ele_trk.getChi2Ndf(),weight)) - continue; - - //Pos Track Quality - Chi2Ndf - if (!vtxSelector->passCutLt("posTrkChi2Ndf_lt",pos_trk.getChi2Ndf(),weight)) - continue; - - //Beam Electron cut - if (!vtxSelector->passCutLt("eleMom_lt",ele_mom.Mag(),weight)) - continue; - - //Ele min momentum cut - if (!vtxSelector->passCutGt("eleMom_gt",ele_mom.Mag(),weight)) - continue; - - //Pos min momentum cut - if (!vtxSelector->passCutGt("posMom_gt",pos_mom.Mag(),weight)) - continue; - - //Ele nHits - int ele2dHits = ele_trk.getTrackerHitCount(); - if (!ele_trk.isKalmanTrack()) - ele2dHits*=2; - - if (!vtxSelector->passCutGt("eleN2Dhits_gt",ele2dHits,weight)) { - continue; - } - - //Pos nHits - int pos2dHits = pos_trk.getTrackerHitCount(); - if (!pos_trk.isKalmanTrack()) - pos2dHits*=2; - - if (!vtxSelector->passCutGt("posN2Dhits_gt",pos2dHits,weight)) { - continue; - } - - //Less than 4 shared hits for ele/pos track - if (!vtxSelector->passCutLt("eleNshared_lt",ele_trk.getNShared(),weight)) { - continue; - } - - if (!vtxSelector->passCutLt("posNshared_lt",pos_trk.getNShared(),weight)) { - continue; - } - - - //Vertex Quality - if (!vtxSelector->passCutLt("chi2unc_lt",vtx->getChi2(),weight)) - continue; - - //Max vtx momentum - if (!vtxSelector->passCutLt("maxVtxMom_lt",(ele_mom+pos_mom).Mag(),weight)) - continue; - - //Min vtx momentum + TVector3 ele_mom; + //ele_mom.SetX(ele->getMomentum()[0]); + //ele_mom.SetY(ele->getMomentum()[1]); + //ele_mom.SetZ(ele->getMomentum()[2]); + ele_mom.SetX(ele_trk.getMomentum()[0]); + ele_mom.SetY(ele_trk.getMomentum()[1]); + ele_mom.SetZ(ele_trk.getMomentum()[2]); + + + TVector3 pos_mom; + //pos_mom.SetX(pos->getMomentum()[0]); + //pos_mom.SetY(pos->getMomentum()[1]); + //pos_mom.SetZ(pos->getMomentum()[2]); + pos_mom.SetX(pos_trk.getMomentum()[0]); + pos_mom.SetY(pos_trk.getMomentum()[1]); + pos_mom.SetZ(pos_trk.getMomentum()[2]); + + + + //Ele Track Quality - Chi2 + if (!vtxSelector->passCutLt("eleTrkChi2_lt",ele_trk.getChi2(),weight)) + continue; + + //Pos Track Quality - Chi2 + if (!vtxSelector->passCutLt("posTrkChi2_lt",pos_trk.getChi2(),weight)) + continue; + + //Ele Track Quality - Chi2Ndf + if (!vtxSelector->passCutLt("eleTrkChi2Ndf_lt",ele_trk.getChi2Ndf(),weight)) + continue; + + //Pos Track Quality - Chi2Ndf + if (!vtxSelector->passCutLt("posTrkChi2Ndf_lt",pos_trk.getChi2Ndf(),weight)) + continue; + + //Beam Electron cut + if (!vtxSelector->passCutLt("eleMom_lt",ele_mom.Mag(),weight)) + continue; + + //Ele min momentum cut + if (!vtxSelector->passCutGt("eleMom_gt",ele_mom.Mag(),weight)) + continue; + + //Pos min momentum cut + if (!vtxSelector->passCutGt("posMom_gt",pos_mom.Mag(),weight)) + continue; + + //Ele nHits + int ele2dHits = ele_trk.getTrackerHitCount(); + if (!ele_trk.isKalmanTrack()) + ele2dHits*=2; + + if (!vtxSelector->passCutGt("eleN2Dhits_gt",ele2dHits,weight)) { + continue; + } + + //Pos nHits + int pos2dHits = pos_trk.getTrackerHitCount(); + if (!pos_trk.isKalmanTrack()) + pos2dHits*=2; + + if (!vtxSelector->passCutGt("posN2Dhits_gt",pos2dHits,weight)) { + continue; + } + + //Less than 4 shared hits for ele/pos track + if (!vtxSelector->passCutLt("eleNshared_lt",ele_trk.getNShared(),weight)) { + continue; + } + + if (!vtxSelector->passCutLt("posNshared_lt",pos_trk.getNShared(),weight)) { + continue; + } + + + //Vertex Quality + if (!vtxSelector->passCutLt("chi2unc_lt",vtx->getChi2(),weight)) + continue; + + //Max vtx momentum + if (!vtxSelector->passCutLt("maxVtxMom_lt",(ele_mom+pos_mom).Mag(),weight)) + continue; + + //Min vtx momentum - if (!vtxSelector->passCutGt("minVtxMom_gt",(ele_mom+pos_mom).Mag(),weight)) - continue; + if (!vtxSelector->passCutGt("minVtxMom_gt",(ele_mom+pos_mom).Mag(),weight)) + continue; - if (debug_) std::cout << "fill 1D Vertex" << std::endl; - _vtx_histos->Fill1DVertex(vtx, - ele, - pos, - &ele_trk, - &pos_trk, - weight); + if (debug_) std::cout << "fill 1D Vertex" << std::endl; + _vtx_histos->Fill1DVertex(vtx, + ele, + pos, + &ele_trk, + &pos_trk, + weight); - if (debug_) std::cout << "fill track histos" << std::endl; - double ele_pos_dt = corr_eleClusterTime - corr_posClusterTime; - double psum = ele_mom.Mag()+pos_mom.Mag(); + if (debug_) std::cout << "fill track histos" << std::endl; + double ele_pos_dt = corr_eleClusterTime - corr_posClusterTime; + double psum = ele_mom.Mag()+pos_mom.Mag(); - _vtx_histos->Fill1DTrack(&ele_trk,weight, "ele_"); - _vtx_histos->Fill1DTrack(&pos_trk,weight, "pos_"); - _vtx_histos->Fill1DHisto("ele_track_n2dhits_h", ele2dHits, weight); - _vtx_histos->Fill1DHisto("pos_track_n2dhits_h", pos2dHits, weight); - _vtx_histos->Fill1DHisto("vtx_Psum_h", p_ele.P()+p_pos.P(), weight); - _vtx_histos->Fill1DHisto("vtx_Esum_h", ele_E + pos_E, weight); - _vtx_histos->Fill1DHisto("vtx_smear_InvM_h", invm_smear*(vtx->getInvMass()), weight); - _vtx_histos->Fill1DHisto("ele_pos_clusTimeDiff_h", (corr_eleClusterTime - corr_posClusterTime), weight); - _vtx_histos->Fill2DHisto("ele_vtxZ_iso_hh", TMath::Min(ele_trk.getIsolation(0), ele_trk.getIsolation(1)), vtx->getZ(), weight); - _vtx_histos->Fill2DHisto("pos_vtxZ_iso_hh", TMath::Min(pos_trk.getIsolation(0), pos_trk.getIsolation(1)), vtx->getZ(), weight); - _vtx_histos->Fill2DHistograms(vtx,weight); - _vtx_histos->Fill2DTrack(&ele_trk,weight,"ele_"); - _vtx_histos->Fill2DTrack(&pos_trk,weight,"pos_"); - _vtx_histos->Fill1DHisto("mcMass622_h",apMass); - _vtx_histos->Fill1DHisto("mcZ622_h",apZ); + _vtx_histos->Fill1DTrack(&ele_trk,weight, "ele_"); + _vtx_histos->Fill1DTrack(&pos_trk,weight, "pos_"); + _vtx_histos->Fill1DHisto("ele_track_n2dhits_h", ele2dHits, weight); + _vtx_histos->Fill1DHisto("pos_track_n2dhits_h", pos2dHits, weight); + _vtx_histos->Fill1DHisto("vtx_Psum_h", p_ele.P()+p_pos.P(), weight); + _vtx_histos->Fill1DHisto("vtx_Esum_h", ele_E + pos_E, weight); + _vtx_histos->Fill1DHisto("vtx_smear_InvM_h", invm_smear*(vtx->getInvMass()), weight); + _vtx_histos->Fill1DHisto("ele_pos_clusTimeDiff_h", (corr_eleClusterTime - corr_posClusterTime), weight); + _vtx_histos->Fill2DHisto("ele_vtxZ_iso_hh", TMath::Min(ele_trk.getIsolation(0), ele_trk.getIsolation(1)), vtx->getZ(), weight); + _vtx_histos->Fill2DHisto("pos_vtxZ_iso_hh", TMath::Min(pos_trk.getIsolation(0), pos_trk.getIsolation(1)), vtx->getZ(), weight); + _vtx_histos->Fill2DHistograms(vtx,weight); + _vtx_histos->Fill2DTrack(&ele_trk,weight,"ele_"); + _vtx_histos->Fill2DTrack(&pos_trk,weight,"pos_"); + _vtx_histos->Fill1DHisto("mcMass622_h",apMass); + _vtx_histos->Fill1DHisto("mcZ622_h",apZ); - //New SIMP histos for developing loose preselection cuts - //2d histos - _vtx_histos->Fill2DHisto("ele_clusT_v_ele_trackT_hh", ele_trk.getTrackTime(), corr_eleClusterTime, weight); - _vtx_histos->Fill2DHisto("pos_clusT_v_pos_trackT_hh", pos_trk.getTrackTime(), corr_posClusterTime, weight); - _vtx_histos->Fill2DHisto("ele_track_time_v_P_hh", ele_trk.getP(), ele_trk.getTrackTime(), weight); - _vtx_histos->Fill2DHisto("pos_track_time_v_P_hh", pos_trk.getP(), pos_trk.getTrackTime(), weight); - _vtx_histos->Fill2DHisto("ele_pos_clusTimeDiff_v_pSum_hh",ele_mom.Mag()+pos_mom.Mag(), ele_pos_dt, weight); - _vtx_histos->Fill2DHisto("ele_cluster_energy_v_track_p_hh",ele_trk.getP(), eleClus.getEnergy(), weight); - _vtx_histos->Fill2DHisto("pos_cluster_energy_v_track_p_hh",pos_trk.getP(), posClus.getEnergy(), weight); - _vtx_histos->Fill2DHisto("ele_track_cluster_dt_v_EoverP_hh",eleClus.getEnergy()/ele_trk.getP(), ele_trk.getTrackTime() - corr_eleClusterTime, weight); - _vtx_histos->Fill2DHisto("pos_track_cluster_dt_v_EoverP_hh",posClus.getEnergy()/pos_trk.getP(), pos_trk.getTrackTime() - corr_posClusterTime, weight); - _vtx_histos->Fill2DHisto("ele_track_clus_dt_v_p_hh",ele_trk.getP(), ele_trk.getTrackTime() - corr_eleClusterTime, weight); - _vtx_histos->Fill2DHisto("pos_track_clus_dt_v_p_hh",pos_trk.getP(), pos_trk.getTrackTime() - corr_posClusterTime, weight); - //chi2 2d plots - _vtx_histos->Fill2DHisto("ele_track_chi2ndf_v_time_hh", ele_trk.getTrackTime(), ele_trk.getChi2Ndf(), weight); - _vtx_histos->Fill2DHisto("ele_track_chi2ndf_v_p_hh", ele_trk.getP(), ele_trk.getChi2Ndf(), weight); - _vtx_histos->Fill2DHisto("ele_track_chi2ndf_v_tanlambda_hh", ele_trk.getTanLambda(), ele_trk.getChi2Ndf(), weight); - _vtx_histos->Fill2DHisto("ele_track_chi2ndf_v_n2dhits_hh", ele2dHits, ele_trk.getChi2Ndf(), weight); - - _vtx_histos->Fill2DHisto("pos_track_chi2ndf_v_time_hh", pos_trk.getTrackTime(), pos_trk.getChi2Ndf(), weight); - _vtx_histos->Fill2DHisto("pos_track_chi2ndf_v_p_hh", pos_trk.getP(), pos_trk.getChi2Ndf(), weight); - _vtx_histos->Fill2DHisto("pos_track_chi2ndf_v_tanlambda_hh", pos_trk.getTanLambda(), pos_trk.getChi2Ndf(), weight); - _vtx_histos->Fill2DHisto("pos_track_chi2ndf_v_n2dhits_hh", pos2dHits, pos_trk.getChi2Ndf(), weight); - - _vtx_histos->Fill2DHisto("ele_track_p_v_tanlambda_hh", ele_trk.getTanLambda(), ele_trk.getP(), weight); - _vtx_histos->Fill2DHisto("pos_track_p_v_tanlambda_hh", pos_trk.getTanLambda(), pos_trk.getP(), weight); - - - //1d histos - _vtx_histos->Fill1DHisto("ele_track_clus_dt_h", ele_trk.getTrackTime() - corr_eleClusterTime, weight); - _vtx_histos->Fill1DHisto("pos_track_clus_dt_h", pos_trk.getTrackTime() - corr_posClusterTime, weight); - - passVtxPresel = true; - - selected_vtxs.push_back(vtx); - vtxSelector->clearSelector(); - } - - // std::cout << "Number of selected vtxs: " << selected_vtxs.size() << std::endl; - - _vtx_histos->Fill1DHisto("n_vertices_h",selected_vtxs.size()); - - - //not working atm - //hps_evt->addVertexCollection("selected_vtxs", selected_vtxs); - - //Make Plots for each region: loop on each region. Check if the region has the cut and apply it - //TODO Clean this up => Cuts should be implemented in each region? - //TODO Bring the preselection out of this stupid loop - - - - if (debug_) std::cout << "start regions" << std::endl; - //TODO add yields. => Quite terrible way to loop. - for (auto region : _regions ) { - - int nGoodVtx = 0; - Vertex* goodVtx = nullptr; - std::vector goodVtxs; - - float truePsum = -1; - float trueEsum = -1; - - for ( auto vtx : selected_vtxs) { - - //No cuts. - _reg_vtx_selectors[region]->getCutFlowHisto()->Fill(0.,weight); - - - Particle* ele = nullptr; - Particle* pos = nullptr; - - _ah->GetParticlesFromVtx(vtx,ele,pos); - - CalCluster eleClus = ele->getCluster(); - CalCluster posClus = pos->getCluster(); - //vtx X position - if (!_reg_vtx_selectors[region]->passCutLt("uncVtxX_lt",fabs(vtx->getX()),weight)) - continue; - - //vtx Y position - if (!_reg_vtx_selectors[region]->passCutLt("uncVtxY_lt",fabs(vtx->getY()),weight)) - continue; - - //vtx Z position - if (!_reg_vtx_selectors[region]->passCutGt("uncVtxZ_gt",vtx->getZ(),weight)) - continue; - - double ele_E = ele->getEnergy(); - double pos_E = pos->getEnergy(); - - //Compute analysis variables here. - - Track ele_trk = ele->getTrack(); - Track pos_trk = pos->getTrack(); - - //Apply Track Bias Corrections - for (const auto& pair : trackBiasCorrections_){ - ele_trk.applyCorrection(pair.first, pair.second); - pos_trk.applyCorrection(pair.first, pair.second); - } - - /* - //Beam Position Corrections - ele_trk.applyCorrection("z0", beamPosCorrections_.at(1)); - pos_trk.applyCorrection("z0", beamPosCorrections_.at(1)); - //Track Time Corrections - ele_trk.applyCorrection("track_time",eleTrackTimeBias_); - pos_trk.applyCorrection("track_time", posTrackTimeBias_); - */ - if (biasingTool_) { - - //Correct the wrong Bfield first - if (bFieldScaleFactor_ > 0) { - biasingTool_->updateWithBiasP(ele_trk,bFieldScaleFactor_); - biasingTool_->updateWithBiasP(pos_trk,bFieldScaleFactor_); - } - - biasingTool_->updateWithBiasP(ele_trk); - biasingTool_->updateWithBiasP(pos_trk); - } - - double invm_smear = 1.; - //std::cout << "[Region loop Vtxs] ele track p before smearing: " << ele_trk.getP() << std::endl; - if (smearingTool_) { - double unsmeared_prod = ele_trk.getP()*pos_trk.getP(); - double ele_smf = smearingTool_->updateWithSmearP(ele_trk); - double pos_smf = smearingTool_->updateWithSmearP(pos_trk); - double smeared_prod = ele_trk.getP()*pos_trk.getP(); - invm_smear = sqrt(smeared_prod/unsmeared_prod); - smearingTool_->updateVertexWithSmearP(vtx, ele_smf, pos_smf); - } - //std::cout << "[Region loop vtxs] ele track p after smearing: " << ele_trk.getP() << std::endl; - - //Add the momenta to the tracks - //ele_trk.setMomentum(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); - //pos_trk.setMomentum(pos->getMomentum()[0],pos->getMomentum()[1],pos->getMomentum()[2]); - TVector3 recEleP(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); - TLorentzVector p_ele; - p_ele.SetPxPyPzE(ele_trk.getMomentum()[0],ele_trk.getMomentum()[1],ele_trk.getMomentum()[2], ele_E); - TLorentzVector p_pos; - p_pos.SetPxPyPzE(pos_trk.getMomentum()[0],pos_trk.getMomentum()[1],pos_trk.getMomentum()[2], pos_E); - - //Get the layers hit on each track - std::vector ele_hit_layers = ele_trk.getHitLayers(); - int ele_Si0 = 0; - int ele_Si1 = 0; - int ele_lastlayer = 0; - for(int i=0; i pos_hit_layers = pos_trk.getHitLayers(); - int pos_Si0 = 0; - int pos_Si1 = 0; - int pos_lastlayer = 0; - for(int i=0; iInnermostLayerCheck(&ele_trk, foundL1ele, foundL2ele); - - - if (debug_) { - std::cout<<"Check on pos_Track"<InnermostLayerCheck(&pos_trk, foundL1pos, foundL2pos); - - if (debug_) { - std::cout<<"Check on pos_Track"<passCutEq("Pair1_eq",(int)evth_->isPair1Trigger(),weight)) - break; - } - //Ele Track Time - if (!_reg_vtx_selectors[region]->passCutLt("eleTrkTime_lt",fabs(ele_trk.getTrackTime()),weight)) - continue; - - //Pos Track Time - if (!_reg_vtx_selectors[region]->passCutLt("posTrkTime_lt",fabs(pos_trk.getTrackTime()),weight)) - continue; - - //Ele Track-cluster match - if (!_reg_vtx_selectors[region]->passCutLt("eleTrkCluMatch_lt",ele->getGoodnessOfPID(),weight)) - continue; - - //Pos Track-cluster match - if (!_reg_vtx_selectors[region]->passCutLt("posTrkCluMatch_lt",pos->getGoodnessOfPID(),weight)) - continue; - - //Require Positron Cluster exists - if (!_reg_vtx_selectors[region]->passCutGt("posClusE_gt",posClus.getEnergy(),weight)) - continue; - - //Require Positron Cluster does NOT exists - if (!_reg_vtx_selectors[region]->passCutLt("posClusE_lt",posClus.getEnergy(),weight)) - continue; - - double corr_eleClusterTime = ele->getCluster().getTime() - timeOffset_; - double corr_posClusterTime = pos->getCluster().getTime() - timeOffset_; - - double botClusTime = 0.0; - if(ele->getCluster().getPosition().at(1) < 0.0) botClusTime = ele->getCluster().getTime(); - else botClusTime = pos->getCluster().getTime(); - - //Bottom Cluster Time - if (!_reg_vtx_selectors[region]->passCutLt("botCluTime_lt", botClusTime, weight)) - continue; - - if (!_reg_vtx_selectors[region]->passCutGt("botCluTime_gt", botClusTime, weight)) - continue; - - //Ele Pos Cluster Time Difference - if (!_reg_vtx_selectors[region]->passCutLt("eleposCluTimeDiff_lt",fabs(corr_eleClusterTime - corr_posClusterTime),weight)) - continue; - - //Ele Track-Cluster Time Difference - if (!_reg_vtx_selectors[region]->passCutLt("eleTrkCluTimeDiff_lt",fabs(ele_trk.getTrackTime() - corr_eleClusterTime),weight)) - continue; - - //Pos Track-Cluster Time Difference - if (!_reg_vtx_selectors[region]->passCutLt("posTrkCluTimeDiff_lt",fabs(pos_trk.getTrackTime() - corr_posClusterTime),weight)) - continue; - - TVector3 ele_mom; - ele_mom.SetX(ele_trk.getMomentum()[0]); - ele_mom.SetY(ele_trk.getMomentum()[1]); - ele_mom.SetZ(ele_trk.getMomentum()[2]); - - - TVector3 pos_mom; - pos_mom.SetX(pos_trk.getMomentum()[0]); - pos_mom.SetY(pos_trk.getMomentum()[1]); - pos_mom.SetZ(pos_trk.getMomentum()[2]); - - //Ele Track Quality - Chi2 - if (!_reg_vtx_selectors[region]->passCutLt("eleTrkChi2_lt",ele_trk.getChi2(),weight)) - continue; + //New SIMP histos for developing loose preselection cuts + //2d histos + _vtx_histos->Fill2DHisto("ele_clusT_v_ele_trackT_hh", ele_trk.getTrackTime(), corr_eleClusterTime, weight); + _vtx_histos->Fill2DHisto("pos_clusT_v_pos_trackT_hh", pos_trk.getTrackTime(), corr_posClusterTime, weight); + _vtx_histos->Fill2DHisto("ele_track_time_v_P_hh", ele_trk.getP(), ele_trk.getTrackTime(), weight); + _vtx_histos->Fill2DHisto("pos_track_time_v_P_hh", pos_trk.getP(), pos_trk.getTrackTime(), weight); + _vtx_histos->Fill2DHisto("ele_pos_clusTimeDiff_v_pSum_hh",ele_mom.Mag()+pos_mom.Mag(), ele_pos_dt, weight); + _vtx_histos->Fill2DHisto("ele_cluster_energy_v_track_p_hh",ele_trk.getP(), eleClus.getEnergy(), weight); + _vtx_histos->Fill2DHisto("pos_cluster_energy_v_track_p_hh",pos_trk.getP(), posClus.getEnergy(), weight); + _vtx_histos->Fill2DHisto("ele_track_cluster_dt_v_EoverP_hh",eleClus.getEnergy()/ele_trk.getP(), ele_trk.getTrackTime() - corr_eleClusterTime, weight); + _vtx_histos->Fill2DHisto("pos_track_cluster_dt_v_EoverP_hh",posClus.getEnergy()/pos_trk.getP(), pos_trk.getTrackTime() - corr_posClusterTime, weight); + _vtx_histos->Fill2DHisto("ele_track_clus_dt_v_p_hh",ele_trk.getP(), ele_trk.getTrackTime() - corr_eleClusterTime, weight); + _vtx_histos->Fill2DHisto("pos_track_clus_dt_v_p_hh",pos_trk.getP(), pos_trk.getTrackTime() - corr_posClusterTime, weight); + //chi2 2d plots + _vtx_histos->Fill2DHisto("ele_track_chi2ndf_v_time_hh", ele_trk.getTrackTime(), ele_trk.getChi2Ndf(), weight); + _vtx_histos->Fill2DHisto("ele_track_chi2ndf_v_p_hh", ele_trk.getP(), ele_trk.getChi2Ndf(), weight); + _vtx_histos->Fill2DHisto("ele_track_chi2ndf_v_tanlambda_hh", ele_trk.getTanLambda(), ele_trk.getChi2Ndf(), weight); + _vtx_histos->Fill2DHisto("ele_track_chi2ndf_v_n2dhits_hh", ele2dHits, ele_trk.getChi2Ndf(), weight); + + _vtx_histos->Fill2DHisto("pos_track_chi2ndf_v_time_hh", pos_trk.getTrackTime(), pos_trk.getChi2Ndf(), weight); + _vtx_histos->Fill2DHisto("pos_track_chi2ndf_v_p_hh", pos_trk.getP(), pos_trk.getChi2Ndf(), weight); + _vtx_histos->Fill2DHisto("pos_track_chi2ndf_v_tanlambda_hh", pos_trk.getTanLambda(), pos_trk.getChi2Ndf(), weight); + _vtx_histos->Fill2DHisto("pos_track_chi2ndf_v_n2dhits_hh", pos2dHits, pos_trk.getChi2Ndf(), weight); + + _vtx_histos->Fill2DHisto("ele_track_p_v_tanlambda_hh", ele_trk.getTanLambda(), ele_trk.getP(), weight); + _vtx_histos->Fill2DHisto("pos_track_p_v_tanlambda_hh", pos_trk.getTanLambda(), pos_trk.getP(), weight); + + + //1d histos + _vtx_histos->Fill1DHisto("ele_track_clus_dt_h", ele_trk.getTrackTime() - corr_eleClusterTime, weight); + _vtx_histos->Fill1DHisto("pos_track_clus_dt_h", pos_trk.getTrackTime() - corr_posClusterTime, weight); + + passVtxPresel = true; + + selected_vtxs.push_back(vtx); + vtxSelector->clearSelector(); + } + + // std::cout << "Number of selected vtxs: " << selected_vtxs.size() << std::endl; + + _vtx_histos->Fill1DHisto("n_vertices_h",selected_vtxs.size()); + + + //not working atm + //hps_evt->addVertexCollection("selected_vtxs", selected_vtxs); + + //Make Plots for each region: loop on each region. Check if the region has the cut and apply it + //TODO Clean this up => Cuts should be implemented in each region? + //TODO Bring the preselection out of this stupid loop + + + + if (debug_) std::cout << "start regions" << std::endl; + //TODO add yields. => Quite terrible way to loop. + for (auto region : _regions ) { + + int nGoodVtx = 0; + Vertex* goodVtx = nullptr; + std::vector goodVtxs; + + float truePsum = -1; + float trueEsum = -1; + + for ( auto vtx : selected_vtxs) { + + //No cuts. + _reg_vtx_selectors[region]->getCutFlowHisto()->Fill(0.,weight); + + + Particle* ele = nullptr; + Particle* pos = nullptr; + + _ah->GetParticlesFromVtx(vtx,ele,pos); + + CalCluster eleClus = ele->getCluster(); + CalCluster posClus = pos->getCluster(); + //vtx X position + if (!_reg_vtx_selectors[region]->passCutLt("uncVtxX_lt",fabs(vtx->getX()),weight)) + continue; + + //vtx Y position + if (!_reg_vtx_selectors[region]->passCutLt("uncVtxY_lt",fabs(vtx->getY()),weight)) + continue; + + //vtx Z position + if (!_reg_vtx_selectors[region]->passCutGt("uncVtxZ_gt",vtx->getZ(),weight)) + continue; + + double ele_E = ele->getEnergy(); + double pos_E = pos->getEnergy(); + + //Compute analysis variables here. + + Track ele_trk = ele->getTrack(); + Track pos_trk = pos->getTrack(); + + //Apply Track Bias Corrections + for (const auto& pair : trackBiasCorrections_){ + ele_trk.applyCorrection(pair.first, pair.second); + pos_trk.applyCorrection(pair.first, pair.second); + } + + /* + //Beam Position Corrections + ele_trk.applyCorrection("z0", beamPosCorrections_.at(1)); + pos_trk.applyCorrection("z0", beamPosCorrections_.at(1)); + //Track Time Corrections + ele_trk.applyCorrection("track_time",eleTrackTimeBias_); + pos_trk.applyCorrection("track_time", posTrackTimeBias_); + */ + if (biasingTool_) { + + //Correct the wrong Bfield first + if (bFieldScaleFactor_ > 0) { + biasingTool_->updateWithBiasP(ele_trk,bFieldScaleFactor_); + biasingTool_->updateWithBiasP(pos_trk,bFieldScaleFactor_); + } + + biasingTool_->updateWithBiasP(ele_trk); + biasingTool_->updateWithBiasP(pos_trk); + } + + double invm_smear = 1.; + //std::cout << "[Region loop Vtxs] ele track p before smearing: " << ele_trk.getP() << std::endl; + if (smearingTool_) { + double unsmeared_prod = ele_trk.getP()*pos_trk.getP(); + double ele_smf = smearingTool_->updateWithSmearP(ele_trk); + double pos_smf = smearingTool_->updateWithSmearP(pos_trk); + double smeared_prod = ele_trk.getP()*pos_trk.getP(); + invm_smear = sqrt(smeared_prod/unsmeared_prod); + smearingTool_->updateVertexWithSmearP(vtx, ele_smf, pos_smf); + } + //std::cout << "[Region loop vtxs] ele track p after smearing: " << ele_trk.getP() << std::endl; + + //Add the momenta to the tracks + //ele_trk.setMomentum(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); + //pos_trk.setMomentum(pos->getMomentum()[0],pos->getMomentum()[1],pos->getMomentum()[2]); + TVector3 recEleP(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); + TLorentzVector p_ele; + p_ele.SetPxPyPzE(ele_trk.getMomentum()[0],ele_trk.getMomentum()[1],ele_trk.getMomentum()[2], ele_E); + TLorentzVector p_pos; + p_pos.SetPxPyPzE(pos_trk.getMomentum()[0],pos_trk.getMomentum()[1],pos_trk.getMomentum()[2], pos_E); + + //Get the layers hit on each track + std::vector ele_hit_layers = ele_trk.getHitLayers(); + int ele_Si0 = 0; + int ele_Si1 = 0; + int ele_lastlayer = 0; + for(int i=0; i pos_hit_layers = pos_trk.getHitLayers(); + int pos_Si0 = 0; + int pos_Si1 = 0; + int pos_lastlayer = 0; + for(int i=0; iInnermostLayerCheck(&ele_trk, foundL1ele, foundL2ele); + + + if (debug_) { + std::cout<<"Check on pos_Track"<InnermostLayerCheck(&pos_trk, foundL1pos, foundL2pos); + + if (debug_) { + std::cout<<"Check on pos_Track"<passCutEq("Pair1_eq",(int)evth_->isPair1Trigger(),weight)) + break; + } + //Ele Track Time + if (!_reg_vtx_selectors[region]->passCutLt("eleTrkTime_lt",fabs(ele_trk.getTrackTime()),weight)) + continue; + + //Pos Track Time + if (!_reg_vtx_selectors[region]->passCutLt("posTrkTime_lt",fabs(pos_trk.getTrackTime()),weight)) + continue; + + //Ele Track-cluster match + if (!_reg_vtx_selectors[region]->passCutLt("eleTrkCluMatch_lt",ele->getGoodnessOfPID(),weight)) + continue; + + //Pos Track-cluster match + if (!_reg_vtx_selectors[region]->passCutLt("posTrkCluMatch_lt",pos->getGoodnessOfPID(),weight)) + continue; + + //Require Positron Cluster exists + if (!_reg_vtx_selectors[region]->passCutGt("posClusE_gt",posClus.getEnergy(),weight)) + continue; + + //Require Positron Cluster does NOT exists + if (!_reg_vtx_selectors[region]->passCutLt("posClusE_lt",posClus.getEnergy(),weight)) + continue; + + double corr_eleClusterTime = ele->getCluster().getTime() - timeOffset_; + double corr_posClusterTime = pos->getCluster().getTime() - timeOffset_; + + double botClusTime = 0.0; + if(ele->getCluster().getPosition().at(1) < 0.0) botClusTime = ele->getCluster().getTime(); + else botClusTime = pos->getCluster().getTime(); + + //Bottom Cluster Time + if (!_reg_vtx_selectors[region]->passCutLt("botCluTime_lt", botClusTime, weight)) + continue; + + if (!_reg_vtx_selectors[region]->passCutGt("botCluTime_gt", botClusTime, weight)) + continue; + + //Ele Pos Cluster Time Difference + if (!_reg_vtx_selectors[region]->passCutLt("eleposCluTimeDiff_lt",fabs(corr_eleClusterTime - corr_posClusterTime),weight)) + continue; + + //Ele Track-Cluster Time Difference + if (!_reg_vtx_selectors[region]->passCutLt("eleTrkCluTimeDiff_lt",fabs(ele_trk.getTrackTime() - corr_eleClusterTime),weight)) + continue; + + //Pos Track-Cluster Time Difference + if (!_reg_vtx_selectors[region]->passCutLt("posTrkCluTimeDiff_lt",fabs(pos_trk.getTrackTime() - corr_posClusterTime),weight)) + continue; + + TVector3 ele_mom; + ele_mom.SetX(ele_trk.getMomentum()[0]); + ele_mom.SetY(ele_trk.getMomentum()[1]); + ele_mom.SetZ(ele_trk.getMomentum()[2]); + + + TVector3 pos_mom; + pos_mom.SetX(pos_trk.getMomentum()[0]); + pos_mom.SetY(pos_trk.getMomentum()[1]); + pos_mom.SetZ(pos_trk.getMomentum()[2]); + + //Ele Track Quality - Chi2 + if (!_reg_vtx_selectors[region]->passCutLt("eleTrkChi2_lt",ele_trk.getChi2(),weight)) + continue; - //Pos Track Quality - Chi2 - if (!_reg_vtx_selectors[region]->passCutLt("posTrkChi2_lt",pos_trk.getChi2(),weight)) - continue; + //Pos Track Quality - Chi2 + if (!_reg_vtx_selectors[region]->passCutLt("posTrkChi2_lt",pos_trk.getChi2(),weight)) + continue; - //Ele Track Quality - Chi2Ndf - if (!_reg_vtx_selectors[region]->passCutLt("eleTrkChi2Ndf_lt",ele_trk.getChi2Ndf(),weight)) - continue; - - //Pos Track Quality - Chi2Ndf - if (!_reg_vtx_selectors[region]->passCutLt("posTrkChi2Ndf_lt",pos_trk.getChi2Ndf(),weight)) - continue; - - //Beam Electron cut - if (!_reg_vtx_selectors[region]->passCutLt("eleMom_lt",ele_mom.Mag(),weight)) - continue; - - //Ele min momentum cut - if (!_reg_vtx_selectors[region]->passCutGt("eleMom_gt",ele_mom.Mag(),weight)) - continue; - - //Pos min momentum cut - if (!_reg_vtx_selectors[region]->passCutGt("posMom_gt",pos_mom.Mag(),weight)) - continue; - - //Ele nHits - int ele2dHits = ele_trk.getTrackerHitCount(); - if (!ele_trk.isKalmanTrack()) - ele2dHits*=2; - - if (!_reg_vtx_selectors[region]->passCutGt("eleN2Dhits_gt",ele2dHits,weight)) { - continue; - } - - //Pos nHits - int pos2dHits = pos_trk.getTrackerHitCount(); - if (!pos_trk.isKalmanTrack()) - pos2dHits*=2; + //Ele Track Quality - Chi2Ndf + if (!_reg_vtx_selectors[region]->passCutLt("eleTrkChi2Ndf_lt",ele_trk.getChi2Ndf(),weight)) + continue; + + //Pos Track Quality - Chi2Ndf + if (!_reg_vtx_selectors[region]->passCutLt("posTrkChi2Ndf_lt",pos_trk.getChi2Ndf(),weight)) + continue; + + //Beam Electron cut + if (!_reg_vtx_selectors[region]->passCutLt("eleMom_lt",ele_mom.Mag(),weight)) + continue; + + //Ele min momentum cut + if (!_reg_vtx_selectors[region]->passCutGt("eleMom_gt",ele_mom.Mag(),weight)) + continue; + + //Pos min momentum cut + if (!_reg_vtx_selectors[region]->passCutGt("posMom_gt",pos_mom.Mag(),weight)) + continue; + + //Ele nHits + int ele2dHits = ele_trk.getTrackerHitCount(); + if (!ele_trk.isKalmanTrack()) + ele2dHits*=2; + + if (!_reg_vtx_selectors[region]->passCutGt("eleN2Dhits_gt",ele2dHits,weight)) { + continue; + } + + //Pos nHits + int pos2dHits = pos_trk.getTrackerHitCount(); + if (!pos_trk.isKalmanTrack()) + pos2dHits*=2; - if (!_reg_vtx_selectors[region]->passCutGt("posN2Dhits_gt",pos2dHits,weight)) { - continue; - } - - //Less than 4 shared hits for ele/pos track - if (!_reg_vtx_selectors[region]->passCutLt("eleNshared_lt",ele_trk.getNShared(),weight)) { - continue; - } - - if (!_reg_vtx_selectors[region]->passCutLt("posNshared_lt",pos_trk.getNShared(),weight)) { - continue; - } - - //Vertex Quality - if (!_reg_vtx_selectors[region]->passCutLt("chi2unc_lt",vtx->getChi2(),weight)) - continue; - - //Max vtx momentum - if (!_reg_vtx_selectors[region]->passCutLt("maxVtxMom_lt",(ele_mom+pos_mom).Mag(),weight)) - continue; - - //Min vtx momentum - if (!_reg_vtx_selectors[region]->passCutGt("minVtxMom_gt",(ele_mom+pos_mom).Mag(),weight)) - continue; - - //END PRESELECTION CUTS - - //L1 requirement - if (!_reg_vtx_selectors[region]->passCutEq("L1Requirement_eq",(int)(foundL1ele&&foundL1pos),weight)) - continue; - - //L2 requirement - if (!_reg_vtx_selectors[region]->passCutEq("L2Requirement_eq",(int)(foundL2ele&&foundL2pos),weight)) - continue; - - //L1 requirement for positron - if (!_reg_vtx_selectors[region]->passCutEq("L1PosReq_eq",(int)(foundL1pos),weight)) - continue; - - //ESum low cut - if (!_reg_vtx_selectors[region]->passCutLt("eSum_lt",(ele_E+pos_E),weight)) - continue; - - //ESum high cut - if (!_reg_vtx_selectors[region]->passCutGt("eSum_gt",(ele_E+pos_E),weight)) - continue; - - //PSum low cut - if (!_reg_vtx_selectors[region]->passCutLt("pSum_lt",(p_ele.P()+p_pos.P()),weight)) - continue; - - //PSum high cut - if (!_reg_vtx_selectors[region]->passCutGt("pSum_gt",(p_ele.P()+p_pos.P()),weight)) - continue; - - //Require Electron Cluster exists - if (!_reg_vtx_selectors[region]->passCutGt("eleClusE_gt",eleClus.getEnergy(),weight)) - continue; - - - //Require Electron Cluster does NOT exists - if (!_reg_vtx_selectors[region]->passCutLt("eleClusE_lt",eleClus.getEnergy(),weight)) - continue; - - //No shared hits requirement - if (!_reg_vtx_selectors[region]->passCutEq("ele_sharedL0_eq",(int)ele_trk.getSharedLy0(),weight)) - continue; - if (!_reg_vtx_selectors[region]->passCutEq("pos_sharedL0_eq",(int)pos_trk.getSharedLy0(),weight)) - continue; - if (!_reg_vtx_selectors[region]->passCutEq("ele_sharedL1_eq",(int)ele_trk.getSharedLy1(),weight)) - continue; - if (!_reg_vtx_selectors[region]->passCutEq("pos_sharedL1_eq",(int)pos_trk.getSharedLy1(),weight)) - continue; - - //Min vtx Y pos - if (!_reg_vtx_selectors[region]->passCutGt("VtxYPos_gt", vtx->getY(), weight)) - continue; - - //Max vtx Y pos - if (!_reg_vtx_selectors[region]->passCutLt("VtxYPos_lt", vtx->getY(), weight)) - continue; - - //Tracking Volume for positron - if (!_reg_vtx_selectors[region]->passCutGt("volPos_top", p_pos.Py(), weight)) - continue; - - if (!_reg_vtx_selectors[region]->passCutLt("volPos_bot", p_pos.Py(), weight)) - continue; - - if (!_reg_vtx_selectors[region]->passCutLt("deltaZ_lt", std::abs((ele_trk.getZ0()/ele_trk.getTanLambda()) - (pos_trk.getZ0()/pos_trk.getTanLambda())), weight)) - continue; - - //If this is MC check if MCParticle matched to the electron track is from rad or recoil - if(!isData_) - { - //Fill MC plots after all selections - _reg_mc_vtx_histos[region]->FillMCParticles(mcParts_, analysis_); - - //Count the number of hits per part on the ele track - std::map nHits4part; - for(int i =0; i < ele_trk.getMcpHits().size(); i++) - { - int partID = ele_trk.getMcpHits().at(i).second; - if ( nHits4part.find(partID) == nHits4part.end() ) - { - // not found - nHits4part[partID] = 1; - } - else - { - // found - nHits4part[partID]++; - } - } - - //Determine the MC part with the most hits on the track - int maxNHits = 0; - int maxID = 0; - for (std::map::iterator it=nHits4part.begin(); it!=nHits4part.end(); ++it) - { - if(it->second > maxNHits) - { - maxNHits = it->second; - maxID = it->first; - } - } - - //Find the correct mc part and grab mother id - int isRadEle = -999; - int isRecEle = -999; - - - trueEleP.SetXYZ(-999,-999,-999); - truePosP.SetXYZ(-999,-999,-999); - if (mcParts_) { - float trueEleE = -1; - float truePosE = -1; - for(int i = 0; i < mcParts_->size(); i++) - { - int momPDG = mcParts_->at(i)->getMomPDG(); - if(mcParts_->at(i)->getPDG() == 11 && momPDG == isRadPDG_) - { - std::vector lP = mcParts_->at(i)->getMomentum(); - trueEleP.SetXYZ(lP[0],lP[1],lP[2]); - trueEleE = mcParts_->at(i)->getEnergy(); - - } - if(mcParts_->at(i)->getPDG() == -11 && momPDG == isRadPDG_) - { - std::vector lP = mcParts_->at(i)->getMomentum(); - truePosP.SetXYZ(lP[0],lP[1],lP[2]); - truePosE = mcParts_->at(i)->getEnergy(); - - } - if(trueEleP.X() != -999 && truePosP.X() != -999){ - truePsum = trueEleP.Mag() + trueEleP.Mag(); - trueEsum = trueEleE + truePosE; - } - - if(mcParts_->at(i)->getID() != maxID) continue; - //Default isRadPDG = 622 - if(momPDG == isRadPDG_) isRadEle = 1; - if(momPDG == 623) isRecEle = 1; - } - } - double momRatio = recEleP.Mag() / trueEleP.Mag(); - double momAngle = trueEleP.Angle(recEleP) * TMath::RadToDeg(); - if (!_reg_vtx_selectors[region]->passCutLt("momRatio_lt", momRatio, weight)) continue; - if (!_reg_vtx_selectors[region]->passCutGt("momRatio_gt", momRatio, weight)) continue; - if (!_reg_vtx_selectors[region]->passCutLt("momAngle_lt", momAngle, weight)) continue; - - if (!_reg_vtx_selectors[region]->passCutEq("isRadEle_eq", isRadEle, weight)) continue; - if (!_reg_vtx_selectors[region]->passCutEq("isNotRadEle_eq", isRadEle, weight)) continue; - if (!_reg_vtx_selectors[region]->passCutEq("isRecEle_eq", isRecEle, weight)) continue; - } - - goodVtx = vtx; - nGoodVtx++; - goodVtxs.push_back(vtx); - } // selected vertices - - //N selected vertices - this is quite a silly cut to make at the end. But okay. that's how we decided atm. - if (!_reg_vtx_selectors[region]->passCutEq("nVtxs_eq", nGoodVtx, weight)) - continue; - //Move to after N vertices cut (was filled before) - _reg_vtx_histos[region]->Fill1DHisto("n_vertices_h", nGoodVtx, weight); - - //Loop over all selected vertices in the region - for(std::vector::iterator it = goodVtxs.begin(); it != goodVtxs.end(); it++){ - - Vertex* vtx = *it; - - Particle* ele = nullptr; - Particle* pos = nullptr; - - if (!vtx || !_ah->GetParticlesFromVtx(vtx,ele,pos)) - continue; - - CalCluster eleClus = ele->getCluster(); - CalCluster posClus = pos->getCluster(); - - double corr_eleClusterTime = ele->getCluster().getTime() - timeOffset_; - double corr_posClusterTime = pos->getCluster().getTime() - timeOffset_; - - double ele_E = ele->getEnergy(); - double pos_E = pos->getEnergy(); - - //Compute analysis variables here. - Track ele_trk = ele->getTrack(); - Track pos_trk = pos->getTrack(); - //Get the shared info - TODO change and improve - // - //Apply Track Bias Corrections - for (const auto& pair : trackBiasCorrections_){ - ele_trk.applyCorrection(pair.first, pair.second); - pos_trk.applyCorrection(pair.first, pair.second); - } - /* - //Track Time Corrections - ele_trk.applyCorrection("track_time",eleTrackTimeBias_); - pos_trk.applyCorrection("track_time", posTrackTimeBias_); - ele_trk.applyCorrection("z0", beamPosCorrections_.at(1)); - pos_trk.applyCorrection("z0", beamPosCorrections_.at(1)); - */ - - // Track Momentum bias - - if (biasingTool_) { - - // Correct for wrong track momentum - Bug Fix - // In case there was mis-configuration during reco/hpstr-ntuple step, correct - // the momentum magnitude here using the right bField for the data taking year - - if (bFieldScaleFactor_ > 0) { - biasingTool_->updateWithBiasP(ele_trk,bFieldScaleFactor_); - biasingTool_->updateWithBiasP(pos_trk,bFieldScaleFactor_); - } - - - biasingTool_->updateWithBiasP(ele_trk); - biasingTool_->updateWithBiasP(pos_trk); - } - - - double invm_smear = 1.; - //std::cout << "[Good Vtxs] ele track p before smearing: " << ele_trk.getP() << std::endl; - if (smearingTool_) { - double unsmeared_prod = ele_trk.getP()*pos_trk.getP(); - double ele_smf = smearingTool_->updateWithSmearP(ele_trk); - double pos_smf = smearingTool_->updateWithSmearP(pos_trk); - double smeared_prod = ele_trk.getP()*pos_trk.getP(); - invm_smear = sqrt(smeared_prod/unsmeared_prod); - smearingTool_->updateVertexWithSmearP(vtx, ele_smf, pos_smf); - } - //std::cout << "[Good Vtxs] ele track p after smearing: " << ele_trk.getP() << std::endl; - - //Get the layers hit on each track - std::vector ele_hit_layers = ele_trk.getHitLayers(); - int ele_Si0 = 0; - int ele_Si1 = 0; - int ele_lastlayer = 0; - for(int i=0; i pos_hit_layers = pos_trk.getHitLayers(); - int pos_Si0 = 0; - int pos_Si1 = 0; - int pos_lastlayer = 0; - for(int i=0; i vtx_cov = vtx->getCovariance(); - float cxx = vtx_cov.at(0); - float cyx = vtx_cov.at(1); - float cyy = vtx_cov.at(2); - float czx = vtx_cov.at(3); - float czy = vtx_cov.at(4); - float czz = vtx_cov.at(5); - - - //MC Truth hits in first 4 sensors - int L1L2hitCode = 0; //hit code '1111' means truth ax+ster hits in L1_ele, L1_pos, L2_ele, L2_pos - int L1hitCode = 0; //hit code '1111' means truth in L1_ele_ax, L1_ele_ster, L1_pos_ax, L1_pos_ster - int L2hitCode = 0; // hit code '1111' means truth in L2_ele_ax, L2_ele_ster, L2_pos_ax, L2_pos_ster - if(!isData_){ - //Get hit codes. Only sure this works for 2016 KF as is. - utils::get2016KFMCTruthHitCodes(&ele_trk, &pos_trk, L1L2hitCode, L1hitCode, L2hitCode); - //L1L2 truth hit selection - if (!_reg_vtx_selectors[region]->passCutLt("hitCode_lt",((double)L1L2hitCode)-0.5, weight)) continue; - if (!_reg_vtx_selectors[region]->passCutGt("hitCode_gt",((double)L1L2hitCode)+0.5, weight)) continue; - //Fil hitcodes - _reg_vtx_histos[region]->Fill1DHisto("hitCode_h", L1L2hitCode,weight); - _reg_vtx_histos[region]->Fill1DHisto("L1hitCode_h", L1hitCode,weight); - _reg_vtx_histos[region]->Fill1DHisto("L2hitCode_h", L2hitCode,weight); - } - - //track isolations - //Only calculate isolations if both track L1 and L2 hits exist - bool hasL1ele = false; - bool hasL2ele = false; - _ah->InnermostLayerCheck(&ele_trk, hasL1ele, hasL2ele); - - bool hasL1pos = false; - bool hasL2pos = false; - _ah->InnermostLayerCheck(&pos_trk, hasL1pos, hasL2pos); - - TVector3 ele_mom; - //ele_mom.SetX(ele->getMomentum()[0]); - //ele_mom.SetY(ele->getMomentum()[1]); - //ele_mom.SetZ(ele->getMomentum()[2]); - ele_mom.SetX(ele_trk.getMomentum()[0]); - ele_mom.SetY(ele_trk.getMomentum()[1]); - ele_mom.SetZ(ele_trk.getMomentum()[2]); - - - TVector3 pos_mom; - //pos_mom.SetX(pos->getMomentum()[0]); - //pos_mom.SetY(pos->getMomentum()[1]); - //pos_mom.SetZ(pos->getMomentum()[2]); - pos_mom.SetX(pos_trk.getMomentum()[0]); - pos_mom.SetY(pos_trk.getMomentum()[1]); - pos_mom.SetZ(pos_trk.getMomentum()[2]); - - double ele_pos_dt = corr_eleClusterTime - corr_posClusterTime; - double psum = ele_mom.Mag()+pos_mom.Mag(); - - //Ele nHits - int ele2dHits = ele_trk.getTrackerHitCount(); - if (!ele_trk.isKalmanTrack()) - ele2dHits*=2; - - //pos nHits - int pos2dHits = pos_trk.getTrackerHitCount(); - - if(ts_ != nullptr) - { - _reg_vtx_histos[region]->Fill2DHisto("trig_count_hh", - ((int)ts_->prescaled.Single_3_Top)+((int)ts_->prescaled.Single_3_Bot), - ((int)ts_->prescaled.Single_2_Top)+((int)ts_->prescaled.Single_2_Bot)); - } - _reg_vtx_histos[region]->Fill1DHisto("n_vtx_h", vtxs_->size()); - - //Add the momenta to the tracks - //ele_trk.setMomentum(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); - //pos_trk.setMomentum(pos->getMomentum()[0],pos->getMomentum()[1],pos->getMomentum()[2]); - TVector3 recEleP(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); - TLorentzVector p_ele; - p_ele.SetPxPyPzE(ele_trk.getMomentum()[0],ele_trk.getMomentum()[1],ele_trk.getMomentum()[2], ele_E); - TLorentzVector p_pos; - p_pos.SetPxPyPzE(pos_trk.getMomentum()[0],pos_trk.getMomentum()[1],pos_trk.getMomentum()[2], pos_E); - - - _reg_vtx_histos[region]->Fill2DHistograms(vtx,weight); - _reg_vtx_histos[region]->Fill1DVertex(vtx, - ele, - pos, - &ele_trk, - &pos_trk, - weight); - - _reg_vtx_histos[region]->Fill1DHisto("ele_pos_clusTimeDiff_h", (corr_eleClusterTime - corr_posClusterTime), weight); - _reg_vtx_histos[region]->Fill1DHisto("ele_track_n2dhits_h", ele2dHits, weight); - _reg_vtx_histos[region]->Fill1DHisto("pos_track_n2dhits_h", pos2dHits, weight); - _reg_vtx_histos[region]->Fill1DHisto("vtx_Psum_h", p_ele.P()+p_pos.P(), weight); - _reg_vtx_histos[region]->Fill1DHisto("vtx_Esum_h", eleClus.getEnergy()+posClus.getEnergy(), weight); - _reg_vtx_histos[region]->Fill1DHisto("vtx_smear_InvM_h", invm_smear*(vtx->getInvMass()), weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_vtxZ_iso_hh", TMath::Min(ele_trk.getIsolation(0), ele_trk.getIsolation(1)), vtx->getZ(), weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_vtxZ_iso_hh", TMath::Min(pos_trk.getIsolation(0), pos_trk.getIsolation(1)), vtx->getZ(), weight); - _reg_vtx_histos[region]->Fill2DTrack(&ele_trk,weight,"ele_"); - _reg_vtx_histos[region]->Fill2DTrack(&pos_trk,weight,"pos_"); - _reg_vtx_histos[region]->Fill1DHisto("mcMass622_h",apMass); - _reg_vtx_histos[region]->Fill1DHisto("mcZ622_h",apZ); - _reg_vtx_histos[region]->Fill1DHisto("mcMass625_h",vdMass); - _reg_vtx_histos[region]->Fill1DHisto("mcZ625_h",vdZ); - - - //Just for the selected vertex - if(!isData_) - { - _reg_vtx_histos[region]->Fill2DHisto("vtx_Esum_vs_true_Esum_hh",eleClus.getEnergy()+posClus.getEnergy(), trueEsum, weight); - _reg_vtx_histos[region]->Fill2DHisto("vtx_Psum_vs_true_Psum_hh",p_ele.P()+p_pos.P(), truePsum, weight); - _reg_vtx_histos[region]->Fill1DHisto("true_vtx_psum_h",truePsum,weight); - } - - double reconz = vtx->getZ(); - double ele_trk_z0 = ele_trk.getZ0(); - double ele_trk_z0err = ele_trk.getZ0Err(); - double pos_trk_z0 = pos_trk.getZ0(); - double pos_trk_z0err = pos_trk.getZ0Err(); - - //DeltaZ - double deltaZ = std::abs( (ele_trk_z0/ele_trk.getTanLambda()) - (pos_trk_z0/pos_trk.getTanLambda()) ); - - //Project vertex to target - double vtx_proj_x = -999.9; - double vtx_proj_y = -999.9; - double vtx_proj_x_sig = -999.9; - double vtx_proj_y_sig = -999.9; - double vtx_proj_sig = -999.9; - if(!v0ProjectionFitsCfg_.empty()) - vtx_proj_sig = utils::v0_projection_to_target_significance(v0proj_fits_, evth_->getRunNumber(), - vtx_proj_x, vtx_proj_y, vtx_proj_x_sig, vtx_proj_y_sig, vtx->getX(), vtx->getY(), - reconz, vtx->getP().X(), vtx->getP().Y(), vtx->getP().Z()); - - _reg_vtx_histos[region]->Fill2DHisto("unc_vtx_x_v_unc_vtx_y_hh", vtx->getX(), vtx->getY()); - _reg_vtx_histos[region]->Fill2DHisto("unc_vtx_proj_x_v_unc_vtx_proj_y_hh", vtx_proj_x, vtx_proj_y); - _reg_vtx_histos[region]->Fill2DHisto("unc_vtx_proj_x_y_significance_hh", vtx_proj_x_sig, vtx_proj_y_sig); - _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_vtx_proj_significance_hh", vtx_proj_sig, reconz); - _reg_vtx_histos[region]->Fill2DHisto("vtx_track_reconz_v_Z0err_hh", ele_trk_z0err, reconz); - _reg_vtx_histos[region]->Fill2DHisto("vtx_track_reconz_v_Z0err_hh", pos_trk_z0err, reconz); - _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_z0_hh", ele_trk_z0, reconz); - _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_z0_hh", pos_trk_z0, reconz); - _reg_vtx_histos[region]->Fill2DHisto("vtx_track_recon_z_v_ABSdz0tanlambda_hh", std::abs((ele_trk_z0/ele_trk.getTanLambda()) - (pos_trk_z0/pos_trk.getTanLambda())), reconz); - _reg_vtx_histos[region]->Fill2DHisto("vtx_track_recon_z_v_dz0tanlambda_hh", ((ele_trk_z0/ele_trk.getTanLambda()) - (pos_trk_z0/pos_trk.getTanLambda())), reconz); - - _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_cxx_hh", cxx, reconz); - _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_cyy_hh", cyy, reconz); - _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_czz_hh", czz, reconz); - _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_cyx_hh", cyx, reconz); - _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_czx_hh", czx, reconz); - _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_czy_hh", czy, reconz); - _reg_vtx_histos[region]->Fill1DHisto("cxx_h", cxx); - _reg_vtx_histos[region]->Fill1DHisto("cyy_h", cyy); - _reg_vtx_histos[region]->Fill1DHisto("czz_h", czz); - _reg_vtx_histos[region]->Fill1DHisto("cyx_h", cyx); - _reg_vtx_histos[region]->Fill1DHisto("czx_h", czx); - _reg_vtx_histos[region]->Fill1DHisto("czy_h", czy); - _reg_vtx_histos[region]->Fill2DHisto("vtx_track_recon_z_v_z0tanlambda_hh", ele_trk_z0/ele_trk.getTanLambda(), reconz); - _reg_vtx_histos[region]->Fill2DHisto("vtx_track_recon_z_v_z0tanlambda_hh", pos_trk_z0/pos_trk.getTanLambda(), reconz); - _reg_vtx_histos[region]->Fill2DHisto("ele_clusT_v_ele_trackT_hh", ele_trk.getTrackTime(), corr_eleClusterTime, weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_clusT_v_pos_trackT_hh", pos_trk.getTrackTime(), corr_posClusterTime, weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_track_time_v_P_hh", ele_trk.getP(), ele_trk.getTrackTime(), weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_track_time_v_P_hh", pos_trk.getP(), pos_trk.getTrackTime(), weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_pos_clusTimeDiff_v_pSum_hh",ele_mom.Mag()+pos_mom.Mag(), ele_pos_dt, weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_cluster_energy_v_track_p_hh",ele_trk.getP(), eleClus.getEnergy(), weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_cluster_energy_v_track_p_hh",pos_trk.getP(), posClus.getEnergy(), weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_track_cluster_dt_v_EoverP_hh",eleClus.getEnergy()/ele_trk.getP(), ele_trk.getTrackTime() - corr_eleClusterTime, weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_track_cluster_dt_v_EoverP_hh",posClus.getEnergy()/pos_trk.getP(), pos_trk.getTrackTime() - corr_posClusterTime, weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_track_clus_dt_v_p_hh",ele_trk.getP(), ele_trk.getTrackTime() - corr_eleClusterTime, weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_track_clus_dt_v_p_hh",pos_trk.getP(), pos_trk.getTrackTime() - corr_posClusterTime, weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_z0_vs_pos_z0_hh",ele_trk.getZ0(), pos_trk.getZ0(), weight); - - //chi2 2d plots - _reg_vtx_histos[region]->Fill2DHisto("ele_track_chi2ndf_v_time_hh", ele_trk.getTrackTime(), ele_trk.getChi2Ndf(), weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_track_chi2ndf_v_p_hh", ele_trk.getP(), ele_trk.getChi2Ndf(), weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_track_chi2ndf_v_tanlambda_hh", ele_trk.getTanLambda(), ele_trk.getChi2Ndf(), weight); - _reg_vtx_histos[region]->Fill2DHisto("ele_track_chi2ndf_v_n2dhits_hh", ele2dHits, ele_trk.getChi2Ndf(), weight); - - _reg_vtx_histos[region]->Fill2DHisto("pos_track_chi2ndf_v_time_hh", pos_trk.getTrackTime(), pos_trk.getChi2Ndf(), weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_track_chi2ndf_v_p_hh", pos_trk.getP(), pos_trk.getChi2Ndf(), weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_track_chi2ndf_v_tanlambda_hh", pos_trk.getTanLambda(), pos_trk.getChi2Ndf(), weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_track_chi2ndf_v_n2dhits_hh", pos2dHits, pos_trk.getChi2Ndf(), weight); - - _reg_vtx_histos[region]->Fill2DHisto("ele_track_p_v_tanlambda_hh", ele_trk.getTanLambda(), ele_trk.getP(), weight); - _reg_vtx_histos[region]->Fill2DHisto("pos_track_p_v_tanlambda_hh", pos_trk.getTanLambda(), pos_trk.getP(), weight); - - - //1d histos - _reg_vtx_histos[region]->Fill1DHisto("ele_track_clus_dt_h", ele_trk.getTrackTime() - corr_eleClusterTime, weight); - _reg_vtx_histos[region]->Fill1DHisto("pos_track_clus_dt_h", pos_trk.getTrackTime() - corr_posClusterTime, weight); - - - //TODO put this in the Vertex! - TVector3 vtxPosSvt; - vtxPosSvt.SetX(vtx->getX()); - vtxPosSvt.SetY(vtx->getY()); - vtxPosSvt.SetZ(vtx->getZ()); - vtxPosSvt.RotateY(-0.0305); - - //Just for the selected vertex - if (makeFlatTuple_){ - if(!isData_){ - _reg_tuples[region]->setVariableValue("ap_true_vtx_z", apZ); - _reg_tuples[region]->setVariableValue("ap_true_vtx_mass", apMass); - _reg_tuples[region]->setVariableValue("ap_true_vtx_energy", apEnergy); - _reg_tuples[region]->setVariableValue("vd_true_vtx_z", vdZ); - _reg_tuples[region]->setVariableValue("vd_true_vtx_mass", vdMass); - _reg_tuples[region]->setVariableValue("vd_true_vtx_energy", vdEnergy); - _reg_tuples[region]->setVariableValue("hitCode", float(L1L2hitCode)); - _reg_tuples[region]->setVariableValue("L1hitCode", float(L1hitCode)); - _reg_tuples[region]->setVariableValue("L2hitCode", float(L2hitCode)); - } - - _reg_tuples[region]->setVariableValue("unc_vtx_mass", vtx->getInvMass()); - _reg_tuples[region]->setVariableValue("unc_vtx_z" , vtxPosSvt.Z()); - _reg_tuples[region]->setVariableValue("unc_vtx_chi2", vtx->getChi2()); - _reg_tuples[region]->setVariableValue("unc_vtx_psum", p_ele.P()+p_pos.P()); - _reg_tuples[region]->setVariableValue("unc_vtx_px", vtx->getP().X()); - _reg_tuples[region]->setVariableValue("unc_vtx_py", vtx->getP().Y()); - _reg_tuples[region]->setVariableValue("unc_vtx_pz", vtx->getP().Z()); - _reg_tuples[region]->setVariableValue("unc_vtx_x", vtx->getX()); - _reg_tuples[region]->setVariableValue("unc_vtx_y", vtx->getY()); - _reg_tuples[region]->setVariableValue("unc_vtx_proj_x", vtx_proj_x); - _reg_tuples[region]->setVariableValue("unc_vtx_proj_y", vtx_proj_y); - _reg_tuples[region]->setVariableValue("unc_vtx_proj_x_sig", vtx_proj_x_sig); - _reg_tuples[region]->setVariableValue("unc_vtx_proj_y_sig", vtx_proj_y_sig); - _reg_tuples[region]->setVariableValue("unc_vtx_proj_sig", vtx_proj_sig); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_pos_clust_dt", corr_eleClusterTime - corr_posClusterTime); - - _reg_tuples[region]->setVariableValue("unc_vtx_cxx", cxx); - _reg_tuples[region]->setVariableValue("unc_vtx_cyy", cyy); - _reg_tuples[region]->setVariableValue("unc_vtx_czz", czz); - _reg_tuples[region]->setVariableValue("unc_vtx_cyx", cyx); - _reg_tuples[region]->setVariableValue("unc_vtx_czy", czy); - _reg_tuples[region]->setVariableValue("unc_vtx_czx", czx); - _reg_tuples[region]->setVariableValue("unc_vtx_deltaZ", deltaZ); - - //track vars - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_p", ele_trk.getP()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_t", ele_trk.getTrackTime()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_d0", ele_trk.getD0()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_phi0", ele_trk.getPhi()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_omega", ele_trk.getOmega()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_tanLambda", ele_trk.getTanLambda()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_z0", ele_trk.getZ0()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_chi2ndf", ele_trk.getChi2Ndf()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_clust_dt", ele_trk.getTrackTime() - corr_eleClusterTime); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_z0Err",ele_trk.getZ0Err()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_d0Err", ele_trk.getD0Err()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_tanLambdaErr", ele_trk.getTanLambdaErr()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_PhiErr", ele_trk.getPhiErr()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_OmegaErr", ele_trk.getOmegaErr()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_nhits",ele2dHits); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_lastlayer",ele_lastlayer); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_si0",ele_Si0); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_si1",ele_Si1); - - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_p", pos_trk.getP()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_t", pos_trk.getTrackTime()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_d0", pos_trk.getD0()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_phi0", pos_trk.getPhi()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_omega", pos_trk.getOmega()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_tanLambda", pos_trk.getTanLambda()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_z0", pos_trk.getZ0()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_chi2ndf", pos_trk.getChi2Ndf()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_clust_dt", pos_trk.getTrackTime() - corr_posClusterTime); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_z0Err",pos_trk.getZ0Err()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_d0Err", pos_trk.getD0Err()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_tanLambdaErr", pos_trk.getTanLambdaErr()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_PhiErr", pos_trk.getPhiErr()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_OmegaErr", pos_trk.getOmegaErr()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_nhits",pos2dHits); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_lastlayer",pos_lastlayer); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_si0",pos_Si0); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_si1",pos_Si1); - - //clust vars - _reg_tuples[region]->setVariableValue("unc_vtx_ele_clust_E", eleClus.getEnergy()); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_clust_x", eleClus.getPosition().at(0)); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_clust_corr_t",corr_eleClusterTime); - - _reg_tuples[region]->setVariableValue("unc_vtx_pos_clust_E", posClus.getEnergy()); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_clust_x", posClus.getPosition().at(0)); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_clust_corr_t",corr_posClusterTime); - _reg_tuples[region]->setVariableValue("run_number", evth_->getRunNumber()); - - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_ecal_x", ele_trk.getPositionAtEcal().at(0)); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_ecal_y", ele_trk.getPositionAtEcal().at(1)); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_z", ele_trk.getPosition().at(2)); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_ecal_x", pos_trk.getPositionAtEcal().at(0)); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_ecal_y", pos_trk.getPositionAtEcal().at(1)); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_z", pos_trk.getPosition().at(2)); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_px", ele_trk.getMomentum().at(0)); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_py", ele_trk.getMomentum().at(1)); - _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_pz", ele_trk.getMomentum().at(2)); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_px", pos_trk.getMomentum().at(0)); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_py", pos_trk.getMomentum().at(1)); - _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_pz", pos_trk.getMomentum().at(2)); - - _reg_tuples[region]->fill(); - } - } - - }// regions - - return true; + if (!_reg_vtx_selectors[region]->passCutGt("posN2Dhits_gt",pos2dHits,weight)) { + continue; + } + + //Less than 4 shared hits for ele/pos track + if (!_reg_vtx_selectors[region]->passCutLt("eleNshared_lt",ele_trk.getNShared(),weight)) { + continue; + } + + if (!_reg_vtx_selectors[region]->passCutLt("posNshared_lt",pos_trk.getNShared(),weight)) { + continue; + } + + //Vertex Quality + if (!_reg_vtx_selectors[region]->passCutLt("chi2unc_lt",vtx->getChi2(),weight)) + continue; + + //Max vtx momentum + if (!_reg_vtx_selectors[region]->passCutLt("maxVtxMom_lt",(ele_mom+pos_mom).Mag(),weight)) + continue; + + //Min vtx momentum + if (!_reg_vtx_selectors[region]->passCutGt("minVtxMom_gt",(ele_mom+pos_mom).Mag(),weight)) + continue; + + //END PRESELECTION CUTS + + //L1 requirement + if (!_reg_vtx_selectors[region]->passCutEq("L1Requirement_eq",(int)(foundL1ele&&foundL1pos),weight)) + continue; + + //L2 requirement + if (!_reg_vtx_selectors[region]->passCutEq("L2Requirement_eq",(int)(foundL2ele&&foundL2pos),weight)) + continue; + + //L1 requirement for positron + if (!_reg_vtx_selectors[region]->passCutEq("L1PosReq_eq",(int)(foundL1pos),weight)) + continue; + + //ESum low cut + if (!_reg_vtx_selectors[region]->passCutLt("eSum_lt",(ele_E+pos_E),weight)) + continue; + + //ESum high cut + if (!_reg_vtx_selectors[region]->passCutGt("eSum_gt",(ele_E+pos_E),weight)) + continue; + + //PSum low cut + if (!_reg_vtx_selectors[region]->passCutLt("pSum_lt",(p_ele.P()+p_pos.P()),weight)) + continue; + + //PSum high cut + if (!_reg_vtx_selectors[region]->passCutGt("pSum_gt",(p_ele.P()+p_pos.P()),weight)) + continue; + + //Require Electron Cluster exists + if (!_reg_vtx_selectors[region]->passCutGt("eleClusE_gt",eleClus.getEnergy(),weight)) + continue; + + + //Require Electron Cluster does NOT exists + if (!_reg_vtx_selectors[region]->passCutLt("eleClusE_lt",eleClus.getEnergy(),weight)) + continue; + + //No shared hits requirement + if (!_reg_vtx_selectors[region]->passCutEq("ele_sharedL0_eq",(int)ele_trk.getSharedLy0(),weight)) + continue; + if (!_reg_vtx_selectors[region]->passCutEq("pos_sharedL0_eq",(int)pos_trk.getSharedLy0(),weight)) + continue; + if (!_reg_vtx_selectors[region]->passCutEq("ele_sharedL1_eq",(int)ele_trk.getSharedLy1(),weight)) + continue; + if (!_reg_vtx_selectors[region]->passCutEq("pos_sharedL1_eq",(int)pos_trk.getSharedLy1(),weight)) + continue; + + //Min vtx Y pos + if (!_reg_vtx_selectors[region]->passCutGt("VtxYPos_gt", vtx->getY(), weight)) + continue; + + //Max vtx Y pos + if (!_reg_vtx_selectors[region]->passCutLt("VtxYPos_lt", vtx->getY(), weight)) + continue; + + //Tracking Volume for positron + if (!_reg_vtx_selectors[region]->passCutGt("volPos_top", p_pos.Py(), weight)) + continue; + + if (!_reg_vtx_selectors[region]->passCutLt("volPos_bot", p_pos.Py(), weight)) + continue; + + if (!_reg_vtx_selectors[region]->passCutLt("deltaZ_lt", std::abs((ele_trk.getZ0()/ele_trk.getTanLambda()) - (pos_trk.getZ0()/pos_trk.getTanLambda())), weight)) + continue; + + //If this is MC check if MCParticle matched to the electron track is from rad or recoil + if(!isData_) + { + //Fill MC plots after all selections + _reg_mc_vtx_histos[region]->FillMCParticles(mcParts_, analysis_); + + //Count the number of hits per part on the ele track + std::map nHits4part; + for(int i =0; i < ele_trk.getMcpHits().size(); i++) + { + int partID = ele_trk.getMcpHits().at(i).second; + if ( nHits4part.find(partID) == nHits4part.end() ) + { + // not found + nHits4part[partID] = 1; + } + else + { + // found + nHits4part[partID]++; + } + } + + //Determine the MC part with the most hits on the track + int maxNHits = 0; + int maxID = 0; + for (std::map::iterator it=nHits4part.begin(); it!=nHits4part.end(); ++it) + { + if(it->second > maxNHits) + { + maxNHits = it->second; + maxID = it->first; + } + } + + //Find the correct mc part and grab mother id + int isRadEle = -999; + int isRecEle = -999; + + + trueEleP.SetXYZ(-999,-999,-999); + truePosP.SetXYZ(-999,-999,-999); + if (mcParts_) { + float trueEleE = -1; + float truePosE = -1; + for(int i = 0; i < mcParts_->size(); i++) + { + int momPDG = mcParts_->at(i)->getMomPDG(); + if(mcParts_->at(i)->getPDG() == 11 && momPDG == isRadPDG_) + { + std::vector lP = mcParts_->at(i)->getMomentum(); + trueEleP.SetXYZ(lP[0],lP[1],lP[2]); + trueEleE = mcParts_->at(i)->getEnergy(); + + } + if(mcParts_->at(i)->getPDG() == -11 && momPDG == isRadPDG_) + { + std::vector lP = mcParts_->at(i)->getMomentum(); + truePosP.SetXYZ(lP[0],lP[1],lP[2]); + truePosE = mcParts_->at(i)->getEnergy(); + + } + if(trueEleP.X() != -999 && truePosP.X() != -999){ + truePsum = trueEleP.Mag() + trueEleP.Mag(); + trueEsum = trueEleE + truePosE; + } + + if(mcParts_->at(i)->getID() != maxID) continue; + //Default isRadPDG = 622 + if(momPDG == isRadPDG_) isRadEle = 1; + if(momPDG == 623) isRecEle = 1; + } + } + double momRatio = recEleP.Mag() / trueEleP.Mag(); + double momAngle = trueEleP.Angle(recEleP) * TMath::RadToDeg(); + if (!_reg_vtx_selectors[region]->passCutLt("momRatio_lt", momRatio, weight)) continue; + if (!_reg_vtx_selectors[region]->passCutGt("momRatio_gt", momRatio, weight)) continue; + if (!_reg_vtx_selectors[region]->passCutLt("momAngle_lt", momAngle, weight)) continue; + + if (!_reg_vtx_selectors[region]->passCutEq("isRadEle_eq", isRadEle, weight)) continue; + if (!_reg_vtx_selectors[region]->passCutEq("isNotRadEle_eq", isRadEle, weight)) continue; + if (!_reg_vtx_selectors[region]->passCutEq("isRecEle_eq", isRecEle, weight)) continue; + } + + goodVtx = vtx; + nGoodVtx++; + goodVtxs.push_back(vtx); + } // selected vertices + + //N selected vertices - this is quite a silly cut to make at the end. But okay. that's how we decided atm. + if (!_reg_vtx_selectors[region]->passCutEq("nVtxs_eq", nGoodVtx, weight)) + continue; + //Move to after N vertices cut (was filled before) + _reg_vtx_histos[region]->Fill1DHisto("n_vertices_h", nGoodVtx, weight); + + //Loop over all selected vertices in the region + for(std::vector::iterator it = goodVtxs.begin(); it != goodVtxs.end(); it++){ + + Vertex* vtx = *it; + + Particle* ele = nullptr; + Particle* pos = nullptr; + + if (!vtx || !_ah->GetParticlesFromVtx(vtx,ele,pos)) + continue; + + CalCluster eleClus = ele->getCluster(); + CalCluster posClus = pos->getCluster(); + + double corr_eleClusterTime = ele->getCluster().getTime() - timeOffset_; + double corr_posClusterTime = pos->getCluster().getTime() - timeOffset_; + + double ele_E = ele->getEnergy(); + double pos_E = pos->getEnergy(); + + //Compute analysis variables here. + Track ele_trk = ele->getTrack(); + Track pos_trk = pos->getTrack(); + //Get the shared info - TODO change and improve + // + //Apply Track Bias Corrections + for (const auto& pair : trackBiasCorrections_){ + ele_trk.applyCorrection(pair.first, pair.second); + pos_trk.applyCorrection(pair.first, pair.second); + } + /* + //Track Time Corrections + ele_trk.applyCorrection("track_time",eleTrackTimeBias_); + pos_trk.applyCorrection("track_time", posTrackTimeBias_); + ele_trk.applyCorrection("z0", beamPosCorrections_.at(1)); + pos_trk.applyCorrection("z0", beamPosCorrections_.at(1)); + */ + + // Track Momentum bias + + if (biasingTool_) { + + // Correct for wrong track momentum - Bug Fix + // In case there was mis-configuration during reco/hpstr-ntuple step, correct + // the momentum magnitude here using the right bField for the data taking year + + if (bFieldScaleFactor_ > 0) { + biasingTool_->updateWithBiasP(ele_trk,bFieldScaleFactor_); + biasingTool_->updateWithBiasP(pos_trk,bFieldScaleFactor_); + } + + + biasingTool_->updateWithBiasP(ele_trk); + biasingTool_->updateWithBiasP(pos_trk); + } + + + double invm_smear = 1.; + //std::cout << "[Good Vtxs] ele track p before smearing: " << ele_trk.getP() << std::endl; + if (smearingTool_) { + double unsmeared_prod = ele_trk.getP()*pos_trk.getP(); + double ele_smf = smearingTool_->updateWithSmearP(ele_trk); + double pos_smf = smearingTool_->updateWithSmearP(pos_trk); + double smeared_prod = ele_trk.getP()*pos_trk.getP(); + invm_smear = sqrt(smeared_prod/unsmeared_prod); + smearingTool_->updateVertexWithSmearP(vtx, ele_smf, pos_smf); + } + //std::cout << "[Good Vtxs] ele track p after smearing: " << ele_trk.getP() << std::endl; + + //Get the layers hit on each track + std::vector ele_hit_layers = ele_trk.getHitLayers(); + int ele_Si0 = 0; + int ele_Si1 = 0; + int ele_lastlayer = 0; + for(int i=0; i pos_hit_layers = pos_trk.getHitLayers(); + int pos_Si0 = 0; + int pos_Si1 = 0; + int pos_lastlayer = 0; + for(int i=0; i vtx_cov = vtx->getCovariance(); + float cxx = vtx_cov.at(0); + float cyx = vtx_cov.at(1); + float cyy = vtx_cov.at(2); + float czx = vtx_cov.at(3); + float czy = vtx_cov.at(4); + float czz = vtx_cov.at(5); + + + //MC Truth hits in first 4 sensors + int L1L2hitCode = 0; //hit code '1111' means truth ax+ster hits in L1_ele, L1_pos, L2_ele, L2_pos + int L1hitCode = 0; //hit code '1111' means truth in L1_ele_ax, L1_ele_ster, L1_pos_ax, L1_pos_ster + int L2hitCode = 0; // hit code '1111' means truth in L2_ele_ax, L2_ele_ster, L2_pos_ax, L2_pos_ster + if(!isData_){ + //Get hit codes. Only sure this works for 2016 KF as is. + utils::get2016KFMCTruthHitCodes(&ele_trk, &pos_trk, L1L2hitCode, L1hitCode, L2hitCode); + //L1L2 truth hit selection + if (!_reg_vtx_selectors[region]->passCutLt("hitCode_lt",((double)L1L2hitCode)-0.5, weight)) continue; + if (!_reg_vtx_selectors[region]->passCutGt("hitCode_gt",((double)L1L2hitCode)+0.5, weight)) continue; + //Fil hitcodes + _reg_vtx_histos[region]->Fill1DHisto("hitCode_h", L1L2hitCode,weight); + _reg_vtx_histos[region]->Fill1DHisto("L1hitCode_h", L1hitCode,weight); + _reg_vtx_histos[region]->Fill1DHisto("L2hitCode_h", L2hitCode,weight); + } + + //track isolations + //Only calculate isolations if both track L1 and L2 hits exist + bool hasL1ele = false; + bool hasL2ele = false; + _ah->InnermostLayerCheck(&ele_trk, hasL1ele, hasL2ele); + + bool hasL1pos = false; + bool hasL2pos = false; + _ah->InnermostLayerCheck(&pos_trk, hasL1pos, hasL2pos); + + TVector3 ele_mom; + //ele_mom.SetX(ele->getMomentum()[0]); + //ele_mom.SetY(ele->getMomentum()[1]); + //ele_mom.SetZ(ele->getMomentum()[2]); + ele_mom.SetX(ele_trk.getMomentum()[0]); + ele_mom.SetY(ele_trk.getMomentum()[1]); + ele_mom.SetZ(ele_trk.getMomentum()[2]); + + + TVector3 pos_mom; + //pos_mom.SetX(pos->getMomentum()[0]); + //pos_mom.SetY(pos->getMomentum()[1]); + //pos_mom.SetZ(pos->getMomentum()[2]); + pos_mom.SetX(pos_trk.getMomentum()[0]); + pos_mom.SetY(pos_trk.getMomentum()[1]); + pos_mom.SetZ(pos_trk.getMomentum()[2]); + + double ele_pos_dt = corr_eleClusterTime - corr_posClusterTime; + double psum = ele_mom.Mag()+pos_mom.Mag(); + + //Ele nHits + int ele2dHits = ele_trk.getTrackerHitCount(); + if (!ele_trk.isKalmanTrack()) + ele2dHits*=2; + + //pos nHits + int pos2dHits = pos_trk.getTrackerHitCount(); + + if(ts_ != nullptr) + { + _reg_vtx_histos[region]->Fill2DHisto("trig_count_hh", + ((int)ts_->prescaled.Single_3_Top)+((int)ts_->prescaled.Single_3_Bot), + ((int)ts_->prescaled.Single_2_Top)+((int)ts_->prescaled.Single_2_Bot)); + } + _reg_vtx_histos[region]->Fill1DHisto("n_vtx_h", vtxs_->size()); + + //Add the momenta to the tracks + //ele_trk.setMomentum(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); + //pos_trk.setMomentum(pos->getMomentum()[0],pos->getMomentum()[1],pos->getMomentum()[2]); + TVector3 recEleP(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); + TLorentzVector p_ele; + p_ele.SetPxPyPzE(ele_trk.getMomentum()[0],ele_trk.getMomentum()[1],ele_trk.getMomentum()[2], ele_E); + TLorentzVector p_pos; + p_pos.SetPxPyPzE(pos_trk.getMomentum()[0],pos_trk.getMomentum()[1],pos_trk.getMomentum()[2], pos_E); + + + _reg_vtx_histos[region]->Fill2DHistograms(vtx,weight); + _reg_vtx_histos[region]->Fill1DVertex(vtx, + ele, + pos, + &ele_trk, + &pos_trk, + weight); + + _reg_vtx_histos[region]->Fill1DHisto("ele_pos_clusTimeDiff_h", (corr_eleClusterTime - corr_posClusterTime), weight); + _reg_vtx_histos[region]->Fill1DHisto("ele_track_n2dhits_h", ele2dHits, weight); + _reg_vtx_histos[region]->Fill1DHisto("pos_track_n2dhits_h", pos2dHits, weight); + _reg_vtx_histos[region]->Fill1DHisto("vtx_Psum_h", p_ele.P()+p_pos.P(), weight); + _reg_vtx_histos[region]->Fill1DHisto("vtx_Esum_h", eleClus.getEnergy()+posClus.getEnergy(), weight); + _reg_vtx_histos[region]->Fill1DHisto("vtx_smear_InvM_h", invm_smear*(vtx->getInvMass()), weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_vtxZ_iso_hh", TMath::Min(ele_trk.getIsolation(0), ele_trk.getIsolation(1)), vtx->getZ(), weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_vtxZ_iso_hh", TMath::Min(pos_trk.getIsolation(0), pos_trk.getIsolation(1)), vtx->getZ(), weight); + _reg_vtx_histos[region]->Fill2DTrack(&ele_trk,weight,"ele_"); + _reg_vtx_histos[region]->Fill2DTrack(&pos_trk,weight,"pos_"); + _reg_vtx_histos[region]->Fill1DHisto("mcMass622_h",apMass); + _reg_vtx_histos[region]->Fill1DHisto("mcZ622_h",apZ); + _reg_vtx_histos[region]->Fill1DHisto("mcMass625_h",vdMass); + _reg_vtx_histos[region]->Fill1DHisto("mcZ625_h",vdZ); + + + //Just for the selected vertex + if(!isData_) + { + _reg_vtx_histos[region]->Fill2DHisto("vtx_Esum_vs_true_Esum_hh",eleClus.getEnergy()+posClus.getEnergy(), trueEsum, weight); + _reg_vtx_histos[region]->Fill2DHisto("vtx_Psum_vs_true_Psum_hh",p_ele.P()+p_pos.P(), truePsum, weight); + _reg_vtx_histos[region]->Fill1DHisto("true_vtx_psum_h",truePsum,weight); + } + + double reconz = vtx->getZ(); + double ele_trk_z0 = ele_trk.getZ0(); + double ele_trk_z0err = ele_trk.getZ0Err(); + double pos_trk_z0 = pos_trk.getZ0(); + double pos_trk_z0err = pos_trk.getZ0Err(); + + //DeltaZ + double deltaZ = std::abs( (ele_trk_z0/ele_trk.getTanLambda()) - (pos_trk_z0/pos_trk.getTanLambda()) ); + + //Project vertex to target + double vtx_proj_x = -999.9; + double vtx_proj_y = -999.9; + double vtx_proj_x_sig = -999.9; + double vtx_proj_y_sig = -999.9; + double vtx_proj_sig = -999.9; + if(!v0ProjectionFitsCfg_.empty()) + vtx_proj_sig = utils::v0_projection_to_target_significance(v0proj_fits_, evth_->getRunNumber(), + vtx_proj_x, vtx_proj_y, vtx_proj_x_sig, vtx_proj_y_sig, vtx->getX(), vtx->getY(), + reconz, vtx->getP().X(), vtx->getP().Y(), vtx->getP().Z()); + + _reg_vtx_histos[region]->Fill2DHisto("unc_vtx_x_v_unc_vtx_y_hh", vtx->getX(), vtx->getY()); + _reg_vtx_histos[region]->Fill2DHisto("unc_vtx_proj_x_v_unc_vtx_proj_y_hh", vtx_proj_x, vtx_proj_y); + _reg_vtx_histos[region]->Fill2DHisto("unc_vtx_proj_x_y_significance_hh", vtx_proj_x_sig, vtx_proj_y_sig); + _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_vtx_proj_significance_hh", vtx_proj_sig, reconz); + _reg_vtx_histos[region]->Fill2DHisto("vtx_track_reconz_v_Z0err_hh", ele_trk_z0err, reconz); + _reg_vtx_histos[region]->Fill2DHisto("vtx_track_reconz_v_Z0err_hh", pos_trk_z0err, reconz); + _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_z0_hh", ele_trk_z0, reconz); + _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_z0_hh", pos_trk_z0, reconz); + _reg_vtx_histos[region]->Fill2DHisto("vtx_track_recon_z_v_ABSdz0tanlambda_hh", std::abs((ele_trk_z0/ele_trk.getTanLambda()) - (pos_trk_z0/pos_trk.getTanLambda())), reconz); + _reg_vtx_histos[region]->Fill2DHisto("vtx_track_recon_z_v_dz0tanlambda_hh", ((ele_trk_z0/ele_trk.getTanLambda()) - (pos_trk_z0/pos_trk.getTanLambda())), reconz); + + _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_cxx_hh", cxx, reconz); + _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_cyy_hh", cyy, reconz); + _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_czz_hh", czz, reconz); + _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_cyx_hh", cyx, reconz); + _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_czx_hh", czx, reconz); + _reg_vtx_histos[region]->Fill2DHisto("recon_z_v_czy_hh", czy, reconz); + _reg_vtx_histos[region]->Fill1DHisto("cxx_h", cxx); + _reg_vtx_histos[region]->Fill1DHisto("cyy_h", cyy); + _reg_vtx_histos[region]->Fill1DHisto("czz_h", czz); + _reg_vtx_histos[region]->Fill1DHisto("cyx_h", cyx); + _reg_vtx_histos[region]->Fill1DHisto("czx_h", czx); + _reg_vtx_histos[region]->Fill1DHisto("czy_h", czy); + _reg_vtx_histos[region]->Fill2DHisto("vtx_track_recon_z_v_z0tanlambda_hh", ele_trk_z0/ele_trk.getTanLambda(), reconz); + _reg_vtx_histos[region]->Fill2DHisto("vtx_track_recon_z_v_z0tanlambda_hh", pos_trk_z0/pos_trk.getTanLambda(), reconz); + _reg_vtx_histos[region]->Fill2DHisto("ele_clusT_v_ele_trackT_hh", ele_trk.getTrackTime(), corr_eleClusterTime, weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_clusT_v_pos_trackT_hh", pos_trk.getTrackTime(), corr_posClusterTime, weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_track_time_v_P_hh", ele_trk.getP(), ele_trk.getTrackTime(), weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_track_time_v_P_hh", pos_trk.getP(), pos_trk.getTrackTime(), weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_pos_clusTimeDiff_v_pSum_hh",ele_mom.Mag()+pos_mom.Mag(), ele_pos_dt, weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_cluster_energy_v_track_p_hh",ele_trk.getP(), eleClus.getEnergy(), weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_cluster_energy_v_track_p_hh",pos_trk.getP(), posClus.getEnergy(), weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_track_cluster_dt_v_EoverP_hh",eleClus.getEnergy()/ele_trk.getP(), ele_trk.getTrackTime() - corr_eleClusterTime, weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_track_cluster_dt_v_EoverP_hh",posClus.getEnergy()/pos_trk.getP(), pos_trk.getTrackTime() - corr_posClusterTime, weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_track_clus_dt_v_p_hh",ele_trk.getP(), ele_trk.getTrackTime() - corr_eleClusterTime, weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_track_clus_dt_v_p_hh",pos_trk.getP(), pos_trk.getTrackTime() - corr_posClusterTime, weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_z0_vs_pos_z0_hh",ele_trk.getZ0(), pos_trk.getZ0(), weight); + + //chi2 2d plots + _reg_vtx_histos[region]->Fill2DHisto("ele_track_chi2ndf_v_time_hh", ele_trk.getTrackTime(), ele_trk.getChi2Ndf(), weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_track_chi2ndf_v_p_hh", ele_trk.getP(), ele_trk.getChi2Ndf(), weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_track_chi2ndf_v_tanlambda_hh", ele_trk.getTanLambda(), ele_trk.getChi2Ndf(), weight); + _reg_vtx_histos[region]->Fill2DHisto("ele_track_chi2ndf_v_n2dhits_hh", ele2dHits, ele_trk.getChi2Ndf(), weight); + + _reg_vtx_histos[region]->Fill2DHisto("pos_track_chi2ndf_v_time_hh", pos_trk.getTrackTime(), pos_trk.getChi2Ndf(), weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_track_chi2ndf_v_p_hh", pos_trk.getP(), pos_trk.getChi2Ndf(), weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_track_chi2ndf_v_tanlambda_hh", pos_trk.getTanLambda(), pos_trk.getChi2Ndf(), weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_track_chi2ndf_v_n2dhits_hh", pos2dHits, pos_trk.getChi2Ndf(), weight); + + _reg_vtx_histos[region]->Fill2DHisto("ele_track_p_v_tanlambda_hh", ele_trk.getTanLambda(), ele_trk.getP(), weight); + _reg_vtx_histos[region]->Fill2DHisto("pos_track_p_v_tanlambda_hh", pos_trk.getTanLambda(), pos_trk.getP(), weight); + + + //1d histos + _reg_vtx_histos[region]->Fill1DHisto("ele_track_clus_dt_h", ele_trk.getTrackTime() - corr_eleClusterTime, weight); + _reg_vtx_histos[region]->Fill1DHisto("pos_track_clus_dt_h", pos_trk.getTrackTime() - corr_posClusterTime, weight); + + + //TODO put this in the Vertex! + TVector3 vtxPosSvt; + vtxPosSvt.SetX(vtx->getX()); + vtxPosSvt.SetY(vtx->getY()); + vtxPosSvt.SetZ(vtx->getZ()); + vtxPosSvt.RotateY(-0.0305); + + //Just for the selected vertex + if (makeFlatTuple_){ + if(!isData_){ + _reg_tuples[region]->setVariableValue("ap_true_vtx_z", apZ); + _reg_tuples[region]->setVariableValue("ap_true_vtx_mass", apMass); + _reg_tuples[region]->setVariableValue("ap_true_vtx_energy", apEnergy); + _reg_tuples[region]->setVariableValue("vd_true_vtx_z", vdZ); + _reg_tuples[region]->setVariableValue("vd_true_vtx_mass", vdMass); + _reg_tuples[region]->setVariableValue("vd_true_vtx_energy", vdEnergy); + _reg_tuples[region]->setVariableValue("hitCode", float(L1L2hitCode)); + _reg_tuples[region]->setVariableValue("L1hitCode", float(L1hitCode)); + _reg_tuples[region]->setVariableValue("L2hitCode", float(L2hitCode)); + } + + _reg_tuples[region]->setVariableValue("unc_vtx_mass", vtx->getInvMass()); + _reg_tuples[region]->setVariableValue("unc_vtx_z" , vtxPosSvt.Z()); + _reg_tuples[region]->setVariableValue("unc_vtx_chi2", vtx->getChi2()); + _reg_tuples[region]->setVariableValue("unc_vtx_psum", p_ele.P()+p_pos.P()); + _reg_tuples[region]->setVariableValue("unc_vtx_px", vtx->getP().X()); + _reg_tuples[region]->setVariableValue("unc_vtx_py", vtx->getP().Y()); + _reg_tuples[region]->setVariableValue("unc_vtx_pz", vtx->getP().Z()); + _reg_tuples[region]->setVariableValue("unc_vtx_x", vtx->getX()); + _reg_tuples[region]->setVariableValue("unc_vtx_y", vtx->getY()); + _reg_tuples[region]->setVariableValue("unc_vtx_proj_x", vtx_proj_x); + _reg_tuples[region]->setVariableValue("unc_vtx_proj_y", vtx_proj_y); + _reg_tuples[region]->setVariableValue("unc_vtx_proj_x_sig", vtx_proj_x_sig); + _reg_tuples[region]->setVariableValue("unc_vtx_proj_y_sig", vtx_proj_y_sig); + _reg_tuples[region]->setVariableValue("unc_vtx_proj_sig", vtx_proj_sig); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_pos_clust_dt", corr_eleClusterTime - corr_posClusterTime); + + _reg_tuples[region]->setVariableValue("unc_vtx_cxx", cxx); + _reg_tuples[region]->setVariableValue("unc_vtx_cyy", cyy); + _reg_tuples[region]->setVariableValue("unc_vtx_czz", czz); + _reg_tuples[region]->setVariableValue("unc_vtx_cyx", cyx); + _reg_tuples[region]->setVariableValue("unc_vtx_czy", czy); + _reg_tuples[region]->setVariableValue("unc_vtx_czx", czx); + _reg_tuples[region]->setVariableValue("unc_vtx_deltaZ", deltaZ); + + //track vars + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_p", ele_trk.getP()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_t", ele_trk.getTrackTime()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_d0", ele_trk.getD0()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_phi0", ele_trk.getPhi()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_omega", ele_trk.getOmega()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_tanLambda", ele_trk.getTanLambda()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_z0", ele_trk.getZ0()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_chi2ndf", ele_trk.getChi2Ndf()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_clust_dt", ele_trk.getTrackTime() - corr_eleClusterTime); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_z0Err",ele_trk.getZ0Err()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_d0Err", ele_trk.getD0Err()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_tanLambdaErr", ele_trk.getTanLambdaErr()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_PhiErr", ele_trk.getPhiErr()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_OmegaErr", ele_trk.getOmegaErr()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_nhits",ele2dHits); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_lastlayer",ele_lastlayer); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_si0",ele_Si0); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_si1",ele_Si1); + + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_p", pos_trk.getP()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_t", pos_trk.getTrackTime()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_d0", pos_trk.getD0()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_phi0", pos_trk.getPhi()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_omega", pos_trk.getOmega()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_tanLambda", pos_trk.getTanLambda()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_z0", pos_trk.getZ0()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_chi2ndf", pos_trk.getChi2Ndf()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_clust_dt", pos_trk.getTrackTime() - corr_posClusterTime); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_z0Err",pos_trk.getZ0Err()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_d0Err", pos_trk.getD0Err()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_tanLambdaErr", pos_trk.getTanLambdaErr()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_PhiErr", pos_trk.getPhiErr()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_OmegaErr", pos_trk.getOmegaErr()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_nhits",pos2dHits); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_lastlayer",pos_lastlayer); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_si0",pos_Si0); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_si1",pos_Si1); + + //clust vars + _reg_tuples[region]->setVariableValue("unc_vtx_ele_clust_E", eleClus.getEnergy()); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_clust_x", eleClus.getPosition().at(0)); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_clust_corr_t",corr_eleClusterTime); + + _reg_tuples[region]->setVariableValue("unc_vtx_pos_clust_E", posClus.getEnergy()); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_clust_x", posClus.getPosition().at(0)); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_clust_corr_t",corr_posClusterTime); + _reg_tuples[region]->setVariableValue("run_number", evth_->getRunNumber()); + + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_ecal_x", ele_trk.getPositionAtEcal().at(0)); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_ecal_y", ele_trk.getPositionAtEcal().at(1)); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_z", ele_trk.getPosition().at(2)); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_ecal_x", pos_trk.getPositionAtEcal().at(0)); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_ecal_y", pos_trk.getPositionAtEcal().at(1)); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_z", pos_trk.getPosition().at(2)); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_px", ele_trk.getMomentum().at(0)); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_py", ele_trk.getMomentum().at(1)); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_track_pz", ele_trk.getMomentum().at(2)); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_px", pos_trk.getMomentum().at(0)); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_py", pos_trk.getMomentum().at(1)); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_track_pz", pos_trk.getMomentum().at(2)); + + _reg_tuples[region]->fill(); + } + } + + }// regions + + return true; } void NewVertexAnaProcessor::finalize() { - //TODO clean this up a little. - outF_->cd(); - _vtx_histos->saveHistos(outF_,_vtx_histos->getName()); - outF_->cd(_vtx_histos->getName().c_str()); - vtxSelector->getCutFlowHisto()->Write(); - - outF_->cd(); - if(!isData_) - _mc_vtx_histos->saveHistos(outF_, _mc_vtx_histos->getName()); - //delete histos; - //histos = nullptr; - - - for (reg_it it = _reg_vtx_histos.begin(); it!=_reg_vtx_histos.end(); ++it) { - std::string dirName = anaName_+"_"+it->first; - (it->second)->saveHistos(outF_,dirName); - outF_->cd(dirName.c_str()); - _reg_vtx_selectors[it->first]->getCutFlowHisto()->Write(); - //Save tuples - if (makeFlatTuple_) - _reg_tuples[it->first]->writeTree(); - - } - - if(!isData_){ - for (reg_mc_it it = _reg_mc_vtx_histos.begin(); it!=_reg_mc_vtx_histos.end(); ++it) { - std::string dirName = anaName_+"_mc_"+it->first; - (it->second)->saveHistos(outF_,dirName); - outF_->cd(dirName.c_str()); - } - } - - outF_->Close(); + //TODO clean this up a little. + outF_->cd(); + _vtx_histos->saveHistos(outF_,_vtx_histos->getName()); + outF_->cd(_vtx_histos->getName().c_str()); + vtxSelector->getCutFlowHisto()->Write(); + + outF_->cd(); + if(!isData_) + _mc_vtx_histos->saveHistos(outF_, _mc_vtx_histos->getName()); + //delete histos; + //histos = nullptr; + + + for (reg_it it = _reg_vtx_histos.begin(); it!=_reg_vtx_histos.end(); ++it) { + std::string dirName = anaName_+"_"+it->first; + (it->second)->saveHistos(outF_,dirName); + outF_->cd(dirName.c_str()); + _reg_vtx_selectors[it->first]->getCutFlowHisto()->Write(); + //Save tuples + if (makeFlatTuple_) + _reg_tuples[it->first]->writeTree(); + + } + + if(!isData_){ + for (reg_mc_it it = _reg_mc_vtx_histos.begin(); it!=_reg_mc_vtx_histos.end(); ++it) { + std::string dirName = anaName_+"_mc_"+it->first; + (it->second)->saveHistos(outF_,dirName); + outF_->cd(dirName.c_str()); + } + } + + outF_->Close(); } From 6e4ad84985996826b658440ebba50ea342a60f45 Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Tue, 18 Jun 2024 13:04:40 -0700 Subject: [PATCH 05/27] a few updates in here. Compatible with newest hps-java track data object. Add in mollers to vertex ana processor. Fix bug in inariant mass smearing --- analysis/include/AnaHelpers.h | 11 ++ analysis/src/AnaHelpers.cxx | 32 +++++ processors/src/NewVertexAnaProcessor.cxx | 32 ++++- processors/src/utilities.cxx | 2 +- utils/src/TrackSmearingTool.cxx | 168 +++++++++++------------ 5 files changed, 154 insertions(+), 91 deletions(-) diff --git a/analysis/include/AnaHelpers.h b/analysis/include/AnaHelpers.h index 241df6f2b..430fd76c0 100644 --- a/analysis/include/AnaHelpers.h +++ b/analysis/include/AnaHelpers.h @@ -83,6 +83,17 @@ class AnaHelpers { * @return false */ bool GetParticlesFromVtx(Vertex* vtx, Particle*& ele, Particle*& pos); + + /** + * @brief Get the Particles From Vtx object + * + * @param vtx + * @param ele1 + * @param ele2 + * @return true + * @return false + */ + bool GetSameParticlesFromVtx(Vertex* vtx, Particle*& ele1, Particle*& ele2); /** * @brief brief description diff --git a/analysis/src/AnaHelpers.cxx b/analysis/src/AnaHelpers.cxx index 74f6dee13..07f954acc 100644 --- a/analysis/src/AnaHelpers.cxx +++ b/analysis/src/AnaHelpers.cxx @@ -87,6 +87,38 @@ bool AnaHelpers::MatchToGBLTracks(int ele_id, int pos_id, Track* & ele_trk, Trac return foundele * foundpos; } +//Use this to get two electrons +bool AnaHelpers::GetSameParticlesFromVtx(Vertex* vtx, Particle*& ele1, Particle*& ele2) { + + + bool foundele1 = false; + bool foundele2 = false; + + for (int ipart = 0; ipart < vtx->getParticles().GetEntries(); ++ipart) { + + + int pdg_id = ((Particle*)vtx->getParticles().At(ipart))->getPDG(); + if (debug_) std::cout<<"In Loop "<getParticles().At(ipart)); + foundele1=true; + } + else{ + ele2 = ((Particle*)vtx->getParticles().At(ipart)); + foundele2=true; + } + } + } + + if (!ele1 || !ele2) { + std::cout<<"Vertex formed without ele/ele. Skip."<addVariable("unc_vtx_ele_clust_E"); _reg_tuples[regname]->addVariable("unc_vtx_ele_clust_x"); + _reg_tuples[regname]->addVariable("unc_vtx_ele_clust_y"); _reg_tuples[regname]->addVariable("unc_vtx_ele_clust_corr_t"); _reg_tuples[regname]->addVariable("unc_vtx_pos_clust_E"); _reg_tuples[regname]->addVariable("unc_vtx_pos_clust_x"); + _reg_tuples[regname]->addVariable("unc_vtx_pos_clust_y"); _reg_tuples[regname]->addVariable("unc_vtx_pos_clust_corr_t"); if(!isData_) @@ -392,7 +394,12 @@ bool NewVertexAnaProcessor::process(IEvent* ievent) { } - bool foundParts = _ah->GetParticlesFromVtx(vtx,ele,pos); + bool foundParts = false; + if(vtxColl_ == "UnconstrainedMollerVertices"){ + foundParts = _ah->GetSameParticlesFromVtx(vtx, ele, pos); + } + else + foundParts = _ah->GetParticlesFromVtx(vtx,ele,pos); if (!foundParts) { if(debug_) std::cout<<"NewVertexAnaProcessor::WARNING::Found vtx without ele/pos. Skip."<updateWithSmearP(ele_trk); double pos_smf = smearingTool_->updateWithSmearP(pos_trk); @@ -716,7 +723,11 @@ bool NewVertexAnaProcessor::process(IEvent* ievent) { Particle* ele = nullptr; Particle* pos = nullptr; - _ah->GetParticlesFromVtx(vtx,ele,pos); + if(vtxColl_ == "UnconstrainedMollerVertices"){ + _ah->GetSameParticlesFromVtx(vtx, ele, pos); + } + else + _ah->GetParticlesFromVtx(vtx,ele,pos); CalCluster eleClus = ele->getCluster(); CalCluster posClus = pos->getCluster(); @@ -768,7 +779,7 @@ bool NewVertexAnaProcessor::process(IEvent* ievent) { double invm_smear = 1.; //std::cout << "[Region loop Vtxs] ele track p before smearing: " << ele_trk.getP() << std::endl; - if (smearingTool_) { + if (smearingTool_ and !isData_) { double unsmeared_prod = ele_trk.getP()*pos_trk.getP(); double ele_smf = smearingTool_->updateWithSmearP(ele_trk); double pos_smf = smearingTool_->updateWithSmearP(pos_trk); @@ -1143,7 +1154,14 @@ bool NewVertexAnaProcessor::process(IEvent* ievent) { Particle* ele = nullptr; Particle* pos = nullptr; - if (!vtx || !_ah->GetParticlesFromVtx(vtx,ele,pos)) + bool foundParts = false; + if(vtxColl_ == "UnconstrainedMollerVertices"){ + foundParts = _ah->GetSameParticlesFromVtx(vtx, ele, pos); + } + else + foundParts = _ah->GetParticlesFromVtx(vtx,ele,pos); + + if (!vtx || !foundParts) continue; CalCluster eleClus = ele->getCluster(); @@ -1194,7 +1212,7 @@ bool NewVertexAnaProcessor::process(IEvent* ievent) { double invm_smear = 1.; //std::cout << "[Good Vtxs] ele track p before smearing: " << ele_trk.getP() << std::endl; - if (smearingTool_) { + if (smearingTool_ and !isData_) { double unsmeared_prod = ele_trk.getP()*pos_trk.getP(); double ele_smf = smearingTool_->updateWithSmearP(ele_trk); double pos_smf = smearingTool_->updateWithSmearP(pos_trk); @@ -1508,10 +1526,12 @@ bool NewVertexAnaProcessor::process(IEvent* ievent) { //clust vars _reg_tuples[region]->setVariableValue("unc_vtx_ele_clust_E", eleClus.getEnergy()); _reg_tuples[region]->setVariableValue("unc_vtx_ele_clust_x", eleClus.getPosition().at(0)); + _reg_tuples[region]->setVariableValue("unc_vtx_ele_clust_y", eleClus.getPosition().at(1)); _reg_tuples[region]->setVariableValue("unc_vtx_ele_clust_corr_t",corr_eleClusterTime); _reg_tuples[region]->setVariableValue("unc_vtx_pos_clust_E", posClus.getEnergy()); _reg_tuples[region]->setVariableValue("unc_vtx_pos_clust_x", posClus.getPosition().at(0)); + _reg_tuples[region]->setVariableValue("unc_vtx_pos_clust_y", posClus.getPosition().at(1)); _reg_tuples[region]->setVariableValue("unc_vtx_pos_clust_corr_t",corr_posClusterTime); _reg_tuples[region]->setVariableValue("run_number", evth_->getRunNumber()); diff --git a/processors/src/utilities.cxx b/processors/src/utilities.cxx index 4ba738858..c69e75975 100644 --- a/processors/src/utilities.cxx +++ b/processors/src/utilities.cxx @@ -296,7 +296,7 @@ Track* utils::buildTrack(EVENT::Track* lc_track, // Check that the TrackData data structure is correct. If it's // not, throw a runtime exception. - if (track_datum->getNDouble() > 14 || track_datum->getNFloat() > 7 || track_datum->getNInt() != 1) { + if (track_datum->getNDouble() > 14 || track_datum->getNFloat() > 8 || track_datum->getNInt() != 1) { throw std::runtime_error("[ TrackingProcessor ]: The collection " + std::string(Collections::TRACK_DATA) + " has the wrong structure."); diff --git a/utils/src/TrackSmearingTool.cxx b/utils/src/TrackSmearingTool.cxx index 24949249e..dfadd3a5f 100644 --- a/utils/src/TrackSmearingTool.cxx +++ b/utils/src/TrackSmearingTool.cxx @@ -5,103 +5,103 @@ #include TrackSmearingTool::TrackSmearingTool(const std::string& smearingfile, - const bool relSmearing, - const int seed, - const std::string& tracks){ - - - relSmearing_ = relSmearing; - std::string hsuffix = relSmearing_ ? "_rel" : ""; - smearingfile_ = std::make_shared(smearingfile.c_str()); - - if (!smearingfile_) - throw std::invalid_argument( "Provided input smearing file does not exists"); - - //cache the smearing histograms - smearing_histo_top_ = (TH1D*) smearingfile_->Get((tracks+"_p_vs_nHits_hh_smearing"+hsuffix).c_str()); - smearing_histo_bot_ = (TH1D*) smearingfile_->Get((tracks+"_p_vs_nHits_hh_smearing"+hsuffix).c_str()); - - if (!smearing_histo_top_ || !smearing_histo_bot_) - throw std::invalid_argument("Top and Bottom smearing histograms not found in smearing file"); - - //setup random engine - if (debug_) - std::cout<<"Setting up random engine with seed "<(seed); - - normal_ = std::make_shared>(0.,1.); - + const bool relSmearing, + const int seed, + const std::string& tracks){ + + + relSmearing_ = relSmearing; + std::string hsuffix = relSmearing_ ? "_rel" : ""; + smearingfile_ = std::make_shared(smearingfile.c_str()); + + if (!smearingfile_) + throw std::invalid_argument( "Provided input smearing file does not exists"); + + //cache the smearing histograms + smearing_histo_top_ = (TH1D*) smearingfile_->Get((tracks+"_p_vs_nHits_hh_smearing"+hsuffix).c_str()); + smearing_histo_bot_ = (TH1D*) smearingfile_->Get((tracks+"_p_vs_nHits_hh_smearing"+hsuffix).c_str()); + + if (!smearing_histo_top_ || !smearing_histo_bot_) + throw std::invalid_argument("Top and Bottom smearing histograms not found in smearing file"); + + //setup random engine + if (debug_) + std::cout<<"Setting up random engine with seed "<(seed); + + normal_ = std::make_shared>(0.,1.); + } double TrackSmearingTool::smearTrackP(const Track& track) { - - double p = track.getP(); - double nhits = track.getTrackerHitCount(); - bool isTop = track.getTanLambda() > 0. ? true : false; - int binN = smearing_histo_top_->FindBin(nhits); - - if (debug_) - std::cout<<"Track nhits="< smearing_histo_top_->GetXaxis()->GetNbins()) { - throw std::invalid_argument("Bin not found in smearing histogram"); - } - - double rel_smear = (*normal_)(*generator_); - double sp = 0.; - - if (isTop) - sp = rel_smear * smearing_histo_top_->GetBinContent(binN); - else - sp = rel_smear * smearing_histo_bot_->GetBinContent(binN); - - double psmear = 0.; - - if (relSmearing_) - psmear = p + sp*p; - else - psmear = p + sp; - - - if (debug_) { - std::cout<<"Track isTop: "< smearing_histo_top_->GetXaxis()->GetNbins()) { + throw std::invalid_argument("Bin not found in smearing histogram"); + } + + double rel_smear = (*normal_)(*generator_); + double sp = 0.; + + if (isTop) + sp = rel_smear * smearing_histo_top_->GetBinContent(binN); + else + sp = rel_smear * smearing_histo_bot_->GetBinContent(binN); + + double psmear = 0.; + + if (relSmearing_) + psmear = p + sp*p; + else + psmear = p + sp; + + + if (debug_) { + std::cout<<"Track isTop: "<GetSameParticlesFromVtx(vtx, ele, pos); } else @@ -448,17 +448,12 @@ bool NewVertexAnaProcessor::process(IEvent* ievent) { std::cout<updateWithSmearP(ele_trk); - double pos_smf = smearingTool_->updateWithSmearP(pos_trk); - double smeared_prod = ele_trk.getP()*pos_trk.getP(); - invm_smear = sqrt(smeared_prod/unsmeared_prod); + double ele_smf = smearingTool_->updateWithSmearP(ele_trk, ele); + double pos_smf = smearingTool_->updateWithSmearP(pos_trk, pos); smearingTool_->updateVertexWithSmearP(vtx, ele_smf, pos_smf); } - //std::cout << "[Before Preselection] ele track p after smearing: " << ele_trk.getP() << std::endl; //Add the momenta to the tracks - do not do that @@ -641,7 +636,6 @@ bool NewVertexAnaProcessor::process(IEvent* ievent) { _vtx_histos->Fill1DHisto("pos_track_n2dhits_h", pos2dHits, weight); _vtx_histos->Fill1DHisto("vtx_Psum_h", p_ele.P()+p_pos.P(), weight); _vtx_histos->Fill1DHisto("vtx_Esum_h", ele_E + pos_E, weight); - _vtx_histos->Fill1DHisto("vtx_smear_InvM_h", invm_smear*(vtx->getInvMass()), weight); _vtx_histos->Fill1DHisto("ele_pos_clusTimeDiff_h", (corr_eleClusterTime - corr_posClusterTime), weight); _vtx_histos->Fill2DHisto("ele_vtxZ_iso_hh", TMath::Min(ele_trk.getIsolation(0), ele_trk.getIsolation(1)), vtx->getZ(), weight); _vtx_histos->Fill2DHisto("pos_vtxZ_iso_hh", TMath::Min(pos_trk.getIsolation(0), pos_trk.getIsolation(1)), vtx->getZ(), weight); @@ -723,7 +717,7 @@ bool NewVertexAnaProcessor::process(IEvent* ievent) { Particle* ele = nullptr; Particle* pos = nullptr; - if(vtxColl_ == "UnconstrainedMollerVertices"){ + if(vtxColl_ == "UnconstrainedMollerVertices" || vtxColl_ == "BeamspotConstrainedMollerVertices"){ _ah->GetSameParticlesFromVtx(vtx, ele, pos); } else @@ -777,18 +771,6 @@ bool NewVertexAnaProcessor::process(IEvent* ievent) { biasingTool_->updateWithBiasP(pos_trk); } - double invm_smear = 1.; - //std::cout << "[Region loop Vtxs] ele track p before smearing: " << ele_trk.getP() << std::endl; - if (smearingTool_ and !isData_) { - double unsmeared_prod = ele_trk.getP()*pos_trk.getP(); - double ele_smf = smearingTool_->updateWithSmearP(ele_trk); - double pos_smf = smearingTool_->updateWithSmearP(pos_trk); - double smeared_prod = ele_trk.getP()*pos_trk.getP(); - invm_smear = sqrt(smeared_prod/unsmeared_prod); - smearingTool_->updateVertexWithSmearP(vtx, ele_smf, pos_smf); - } - //std::cout << "[Region loop vtxs] ele track p after smearing: " << ele_trk.getP() << std::endl; - //Add the momenta to the tracks //ele_trk.setMomentum(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); //pos_trk.setMomentum(pos->getMomentum()[0],pos->getMomentum()[1],pos->getMomentum()[2]); @@ -1155,7 +1137,7 @@ bool NewVertexAnaProcessor::process(IEvent* ievent) { Particle* pos = nullptr; bool foundParts = false; - if(vtxColl_ == "UnconstrainedMollerVertices"){ + if(vtxColl_ == "UnconstrainedMollerVertices" || vtxColl_ == "BeamspotConstrainedMollerVertices"){ foundParts = _ah->GetSameParticlesFromVtx(vtx, ele, pos); } else @@ -1211,17 +1193,6 @@ bool NewVertexAnaProcessor::process(IEvent* ievent) { double invm_smear = 1.; - //std::cout << "[Good Vtxs] ele track p before smearing: " << ele_trk.getP() << std::endl; - if (smearingTool_ and !isData_) { - double unsmeared_prod = ele_trk.getP()*pos_trk.getP(); - double ele_smf = smearingTool_->updateWithSmearP(ele_trk); - double pos_smf = smearingTool_->updateWithSmearP(pos_trk); - double smeared_prod = ele_trk.getP()*pos_trk.getP(); - invm_smear = sqrt(smeared_prod/unsmeared_prod); - smearingTool_->updateVertexWithSmearP(vtx, ele_smf, pos_smf); - } - //std::cout << "[Good Vtxs] ele track p after smearing: " << ele_trk.getP() << std::endl; - //Get the layers hit on each track std::vector ele_hit_layers = ele_trk.getHitLayers(); int ele_Si0 = 0; diff --git a/utils/include/TrackSmearingTool.h b/utils/include/TrackSmearingTool.h index 95a416770..9940938a8 100644 --- a/utils/include/TrackSmearingTool.h +++ b/utils/include/TrackSmearingTool.h @@ -13,6 +13,7 @@ #include "Track.h" #include "Vertex.h" +#include "Particle.h" class TFile; class TH1D; @@ -28,6 +29,7 @@ class TrackSmearingTool { const std::string& tracks = "KalmanFullTracks"); double smearTrackP(const Track& trk); + double updateWithSmearP(Track& trk, Particle* part); double updateWithSmearP(Track& trk); void updateVertexWithSmearP(Vertex* vtx, double ele_smear_factor, double pos_smear_factor); diff --git a/utils/src/TrackSmearingTool.cxx b/utils/src/TrackSmearingTool.cxx index dfadd3a5f..fd5e29a59 100644 --- a/utils/src/TrackSmearingTool.cxx +++ b/utils/src/TrackSmearingTool.cxx @@ -75,7 +75,7 @@ double TrackSmearingTool::smearTrackP(const Track& track) { } -double TrackSmearingTool::updateWithSmearP(Track& trk) { +double TrackSmearingTool::updateWithSmearP(Track& trk, Particle* part) { double smeared_magnitude = smearTrackP(trk); // updated momentum by scaling each coordinate by smeared/unsmeared // this takes the direction of the unsmeared momentum and applies @@ -85,14 +85,30 @@ double TrackSmearingTool::updateWithSmearP(Track& trk) { for (double& coordinate : momentum) coordinate *= (smeared_magnitude/unsmeared_magnitude); trk.setMomentum(momentum); + part->setTrack(&trk); return (smeared_magnitude/unsmeared_magnitude); +} +double TrackSmearingTool::updateWithSmearP(Track& trk) { + double smeared_magnitude = smearTrackP(trk); + // updated momentum by scaling each coordinate by smeared/unsmeared + // this takes the direction of the unsmeared momentum and applies + // the smeared magnitude + std::vector momentum = trk.getMomentum(); + double unsmeared_magnitude = trk.getP(); + for (double& coordinate : momentum) + coordinate *= (smeared_magnitude/unsmeared_magnitude); + trk.setMomentum(momentum); + return (smeared_magnitude/unsmeared_magnitude); } void TrackSmearingTool::updateVertexWithSmearP(Vertex* vtx, double ele_smear_factor, double pos_smear_factor) { TVector3 p1_corr, p2_corr; double m_corr; + double p1_uncorr_p = vtx->getP1().Mag(); + double p2_uncorr_p = vtx->getP2().Mag(); + p1_corr.SetX(vtx->getP1X()*ele_smear_factor); p1_corr.SetY(vtx->getP1Y()*ele_smear_factor); p1_corr.SetZ(vtx->getP1Z()*ele_smear_factor); @@ -101,7 +117,6 @@ void TrackSmearingTool::updateVertexWithSmearP(Vertex* vtx, double ele_smear_fac p2_corr.SetY(vtx->getP2Y()*pos_smear_factor); p2_corr.SetZ(vtx->getP2Z()*pos_smear_factor); - m_corr = vtx->getInvMass() * sqrt((ele_smear_factor/p1_corr.Mag())*(pos_smear_factor/p2_corr.Mag())); - + m_corr = vtx->getInvMass() * sqrt((p1_corr.Mag()/p1_uncorr_p)*(p2_corr.Mag()/p2_uncorr_p)); vtx->setVtxParameters(p1_corr, p2_corr, m_corr); } From 688b2167a2c354db642d14479b7c167bed0c7e34 Mon Sep 17 00:00:00 2001 From: Alic <47487490+alspellm@users.noreply.github.com> Date: Thu, 20 Jun 2024 07:35:27 -0700 Subject: [PATCH 07/27] Update utils/src/TrackSmearingTool.cxx Co-authored-by: Tom Eichlersmith <31970302+tomeichlersmith@users.noreply.github.com> --- utils/src/TrackSmearingTool.cxx | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/utils/src/TrackSmearingTool.cxx b/utils/src/TrackSmearingTool.cxx index fd5e29a59..c2396b10f 100644 --- a/utils/src/TrackSmearingTool.cxx +++ b/utils/src/TrackSmearingTool.cxx @@ -76,17 +76,9 @@ double TrackSmearingTool::smearTrackP(const Track& track) { } double TrackSmearingTool::updateWithSmearP(Track& trk, Particle* part) { - double smeared_magnitude = smearTrackP(trk); - // updated momentum by scaling each coordinate by smeared/unsmeared - // this takes the direction of the unsmeared momentum and applies - // the smeared magnitude - std::vector momentum = trk.getMomentum(); - double unsmeared_magnitude = trk.getP(); - for (double& coordinate : momentum) - coordinate *= (smeared_magnitude/unsmeared_magnitude); - trk.setMomentum(momentum); - part->setTrack(&trk); - return (smeared_magnitude/unsmeared_magnitude); + double smear_factor = updateWithSmearP(trk); + part->setTrack(&trk); // copy updated track into Particle + return smear_factor; } double TrackSmearingTool::updateWithSmearP(Track& trk) { From 5d34b6d3672aa32d1a7ecfec98f3e29f2d3bdae6 Mon Sep 17 00:00:00 2001 From: Alic <47487490+alspellm@users.noreply.github.com> Date: Thu, 20 Jun 2024 07:35:43 -0700 Subject: [PATCH 08/27] Update utils/src/TrackSmearingTool.cxx Co-authored-by: Tom Eichlersmith <31970302+tomeichlersmith@users.noreply.github.com> --- utils/src/TrackSmearingTool.cxx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/src/TrackSmearingTool.cxx b/utils/src/TrackSmearingTool.cxx index c2396b10f..334df244f 100644 --- a/utils/src/TrackSmearingTool.cxx +++ b/utils/src/TrackSmearingTool.cxx @@ -108,7 +108,8 @@ void TrackSmearingTool::updateVertexWithSmearP(Vertex* vtx, double ele_smear_fac p2_corr.SetX(vtx->getP2X()*pos_smear_factor); p2_corr.SetY(vtx->getP2Y()*pos_smear_factor); p2_corr.SetZ(vtx->getP2Z()*pos_smear_factor); - + // smear invariant mass as if it was a Moller (i.e. by sqrt(ele_smear*pos_smear)) + // using the corrected momenta directly to ensure correctness m_corr = vtx->getInvMass() * sqrt((p1_corr.Mag()/p1_uncorr_p)*(p2_corr.Mag()/p2_uncorr_p)); vtx->setVtxParameters(p1_corr, p2_corr, m_corr); } From e4baf277ce5507e45459675c14a93a2e14c18658 Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Thu, 20 Jun 2024 14:37:45 -0700 Subject: [PATCH 09/27] add new nhits fee smearing script and values. Will need to adjust hpstr to read this --- .../fee_smearing/fee_smearing_nhits_2016.py | 157 ++++++++++++++++++ .../smearingFile_2016_all_20240620.root | Bin 0 -> 4556 bytes 2 files changed, 157 insertions(+) create mode 100644 utils/data/fee_smearing/fee_smearing_nhits_2016.py create mode 100644 utils/data/fee_smearing/smearingFile_2016_all_20240620.root diff --git a/utils/data/fee_smearing/fee_smearing_nhits_2016.py b/utils/data/fee_smearing/fee_smearing_nhits_2016.py new file mode 100644 index 000000000..09057f2a9 --- /dev/null +++ b/utils/data/fee_smearing/fee_smearing_nhits_2016.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python +import os +import numpy as np +import math +import ROOT as r + +def gaus_fit(histo, xmin, xmax, smean, swidth, snorm, nsigma=2.0, isData=False): + + #initial fit with seeds + fitfunc = r.TF1("gaus","gaus") + fitfunc.SetParameter(0, snorm) + fitfunc.SetParameter(1, smean) + fitfunc.SetParameter(2, swidth) + fitRes = histo.Fit(fitfunc,"QLES","", xmin, xmax) + params = fitRes.Parameters() + chi2 = fitRes.Chi2() + ndf = fitRes.Ndf() + + #set first fist to be best fit + best_chi2 = chi2/ndf + best_params = params + + #iterate over randomly fluctuated fit parameters. Keep the best resulting fit + niters = 100 + for n in range(niters): + norm = params[0]*np.random.uniform(80,120)*0.01 + mu = params[1]*np.random.uniform(80,120)*0.01 + sigma = params[2]*np.random.uniform(80,120)*0.01 + + #Data has shoulders, so we can specify the xmin and xmax to do an asymmetric fit window + if isData: + xminx = mu - nsigma*sigma + xmaxx = mu + nsigma*sigma + if xminx < xmin: + xminx = xmin + if xmaxx > xmax: + xmaxx = xmax + fitfunc.SetParameter(0, norm) + fitfunc.SetParameter(1, mu) + fitfunc.SetParameter(2, sigma) + fitRes = histo.Fit(fitfunc,"QLES","", xminx, xmaxx) + #If fit fails, skip + try: + if fitRes.Parameters()[1] < xmin or fitRes.Parameters()[1] > xmax or fitRes.Ndf() < 1: + continue + except: + continue + + params = fitRes.Parameters() #these results seed the next iteration...maybe should only do if improved? + chi2 = fitRes.Chi2() + ndf = fitRes.Ndf() + #replace best fit + if chi2/ndf < best_chi2: + best_params = params + + #Do the final fit using the best parameters found + fitfunc.SetParameter(0, best_params[0]) + fitfunc.SetParameter(1, best_params[1]) + fitfunc.SetParameter(2, best_params[2]) + xminx = best_params[1] - nsigma*best_params[2] + xmaxx = best_params[1] + nsigma*best_params[2] + + #again, if data, use asymmetric fit window to avoid the left shoulder + if isData: + if xminx < xmin: + xminx = xmin + if xmaxx > xmax: + xmaxx = xmax + + fitRes = histo.Fit(fitfunc,"QLES","", xminx, xmaxx) + params = fitRes.Parameters() + errors = fitRes.Errors() + chi2 = fitRes.Chi2() + ndf = fitRes.Ndf() + return histo, params, errors, chi2/ndf + +#Load Data Run 7800 FEEs +data_results = {} +infilename = '/sdf/group/hps/user-data/alspellm/2016/fee_smearing/run7800/hadd/hadd_fee_2pt3_recon_fee_histos.root' #FEE skimmed track ana + +#Read track hit histograms +histoname = 'KalmanFullTracks/KalmanFullTracks_p_vs_nHits_top_hh' #top +infile = r.TFile(f'{infilename}',"READ") +top_h = copy.deepcopy(infile.Get(f'{histoname}')) +histoname = 'KalmanFullTracks/KalmanFullTracks_p_vs_nHits_bot_hh' #bot +bot_h = copy.deepcopy(infile.Get(f'{histoname}')) +infile.Close() + +#Change the names to use as keys +top_h.SetName('top') +bot_h.SetName('bot') + +#Fit the FEE peak for each category of nhits. Just have access to 10, 11, 12 for now +for h in [top_h, bot_h]: + histo = h + for nhits in [10, 11, 12]: + #Get the nhits momentum projection + proj = histo.ProjectionY(f'proj_{h.GetName()}_{nhits}hits', histo.GetXaxis().FindBin(nhits), histo.GetXaxis().FindBin(nhits),"") + #Fit the data + _, params, errors, chi2ndf = gaus_fit(proj, 2.0, 2.5, 2.4, 0.47, 12000, nsigma=1.5, isData=True) + + #store the results [mu,sigma] for top/bot nhits= + data_results[f'{h.GetName()}_nhits_{nhits}'] = [params[1], params[2]] + + +################################################################################## +#Read in MC FEE samples...Yes I could have combined these, but I separated them in development. Feel free to improve :) +mc_results = {} +infilename= '/sdf/group/hps/user-data/alspellm/2016/fee_smearing/tritrig/hadd/hadd_fee_2pt3_recon_tritrig_histos.root' + +#Read track hit histograms +histoname = 'KalmanFullTracks/KalmanFullTracks_p_vs_nHits_top_hh' #top +infile = r.TFile(f'{infilename}',"READ") +top_h = copy.deepcopy(infile.Get(f'{histoname}')) +histoname = 'KalmanFullTracks/KalmanFullTracks_p_vs_nHits_bot_hh' #bot +bot_h = copy.deepcopy(infile.Get(f'{histoname}')) +infile.Close() + +#Change the names to use as keys +top_h.SetName('top') +bot_h.SetName('bot') + +for h in [top_h, bot_h]: + histo = h + for nhits in [10, 11, 12]: + print(nhits) + proj = histo.ProjectionY(f'proj_{h.GetName()}_{nhits}hits', histo.GetXaxis().FindBin(nhits), histo.GetXaxis().FindBin(nhits),"") + _, params, errors, chi2ndf = gaus_fit(proj, 2.1, 2.5, 2.2, 0.1, proj.GetMaximum(), nsigma=1.5) + mc_results[f'{h.GetName()}_nhits_{nhits}'] = [params[1], params[2]] + +#Create output file to save results +outfile = r.TFile('smearingFile_2016_nhits.root',"RECREATE") +outfile.cd() +smtop_h = r.TH1F('KalmanFullTracks_p_vs_nHits_hh_smearing_rel_top','p_vs_nHits_smearing_rel_top;nhits;smear factor', 3, 9.5, 12.5) +smbot_h = r.TH1F('KalmanFullTracks_p_vs_nHits_hh_smearing_rel_bot','p_vs_nHits_smearing_rel_bot;nhits;smear factor', 3, 9.5, 12.5) + +#Calculate smearing factor according to 2016 Bump Hunt +smear_fac = lambda mu_data, sig_data, mu_mc, sig_mc : np.sqrt(np.square(sig_data/mu_data) - np.square(sig_mc/mu_mc)) +for key, vals in data_results.items(): + istop = False + if 'top' in key: + istop = True + nhits = float(key.split('_')[2]) + mu_data = vals[0] + sig_data = vals[1] + mu_mc = mc_results[key][0] + sig_mc = mc_results[key][1] + sf = smear_fac(mu_data, sig_data, mu_mc, sig_mc) + print(f'{key} sf={sf}') + + #save results + if istop: + smtop_h.SetBinContent(smtop_h.GetXaxis().FindBin(nhits), sf) + else: + smbot_h.SetBinContent(smbot_h.GetXaxis().FindBin(nhits), sf) + +outfile.Write() diff --git a/utils/data/fee_smearing/smearingFile_2016_all_20240620.root b/utils/data/fee_smearing/smearingFile_2016_all_20240620.root new file mode 100644 index 0000000000000000000000000000000000000000..81fc9c57d728225c3a978c409cbf50b258853a02 GIT binary patch literal 4556 zcmcgwcTiJNmrv*rIwHME=q2Dfw36+(Tm6el`lb1$a#P}~w0+3&L z`0E7#AtZo6pf3z2?gI0N z!4eP)-Z0o>7>rDZALb1EI&x@A=riy0XYNhd=dLnM-i_xdEol85^qLxxOD zBnJpLQy@lz@sNH9Mn6qIn>26m9ga+i$CZne3p0p2W$yb*l8Bm8mWc#Zl9K8&k+NYj zw3^98?NhB&Z3ubUzy=P{@-AE#4)>d(O6NoHj-T{Xd6%K-P>pe~h{z~lH^qRfEI`Wp z7DbxYLJ{?IpHzBU+Dw?;@@-@_+2;Y$UkN(4jwr)$rdtA@AJ~el>FM<`F~9h$V^!_`fQ<$fE-hQ_jlj|FVb4t?mJPKO7ciYS-Egmf?m+a$i1wYf!~j@Eg4Qz)_bccPIcZvldF(5sbyFO+}k>KW>~l5sxDbMzw4<67R%Q6%KSu2T*XAilpupZ*8Y ziL@h9=-(hZaXJan(Ek?~{~wd;ZwVz<7%=+p_Gn#Hn2W-Cu|39azGzwtG|CC?;e;~u zboOG_^YU{<2Js`^Jl&lT#E0)iQucuWXfUyCOxVtOao=UVruJ((e(S1=PnwcPf9bxU zPuhEbQY#DF_sRDa6_P50XRglY&J|$Ga|?W&o$t5tp`o`V1a>B9EJsDU`EXpB7)CNt z2L7FBMguBv-~xG6;5v)>QGt9wul&=4;4KPpjo<3fY=>{eVd$*yuU|U%+ z636hKswCw}%r_s3^w#loj9KiJk47n)=aW(t)O7ZZWcT3?grdS_Ko01fp;R)H7OR;Q zhu%irD|tE{Tz>eE!%xT}4bj!wF(RYVn5a!q^ou%`Fw_1djSvIUo>MJ59Dpgtg;S4h zH1A4H$NE0w-Smegx%k~yN_^)-{}L-BipX3sujL$}c1X2UVuiY#436n)a=DgX!&uO0 zfYpZWgo9 z)9*Jv|0v613zFhfO6xhdzT7E7{?444@#AYY+20zpD=`tiFJfLh;-qM;vtmSaBGe1g zVX8)ZzmhDXR^chFQ4x@58%D zY34n^Tn9YQ*Plt6*jRr|*K(k_85pCy*?33bVEeo9=oN}tos(-a#h!w)&X+j=?V<^x zDnijk_Z<}uk81P9y)D}_CP+>-W2UQ@Y4?YqU_7{v>^oL{s{Q0u^=WW5GLF_I#GlUj zYV>-aC?9{W6;F~D!xOp>F4d`htFe79(1ciO|BSM)zpFN9_C3$n<#R@GzvbD7yK<)u zR6Z#%+3cNbU6pd?@D4PsuwkU%=7Fd$=)NCeH90YVk`;|HL1eS|=7}+zFxbxbT#h3! z*v4`#RsAr?bn#63t;OZjLXXEqH~E>gDO?j#Fy64z)a^C8#nCx2*^v?8VTo>X)Wxo)yDhrKa>9O~RfnORl5 z3`GsLKAZ(=cE!F;+l1k1infXAqmXEz?v|@qLNc2*Qeq9M3)Ysu67N?ILV6rW=*9fz zCAjA39V#Z`6NqnZ+65^ZqYy-Xgp;R>>T|gWcF0`{SO>QJXWVnH``W3 zEfUz?tSq~5d75?OU88W`Ir^$#spqLRwJ;}T9+#L1Rc7fF`fg)NHT}-mZ6VEdcsrU# z@h+MX^GlJTEt;ASn<@#{eJ#>h(bm0fBOAJc(rB9Dt(;!_5{m$=x9Y#5!mh?lVI{HA z-6tXFu=Ie~u{&i_vl5&&lHMSaY&CLuE+4(hyDkbHb~D8tHI1smcSX*L@eZWBUN3RhHYx^%()`8eO|osx1^q1wx(|^f4p{B!IkCcMgz&!%cP)_52ei6 zBk?`!r1RB=Nrzs^`P9HDD5qsn$5S{!>Qe2E|Ni^AF3Zx=()}oP?ko?RCLJrA zI-O>vsgh?M?6qWc4x?k^0&YpCFP4(}--u=C17 zzkfeZmse!RO}+ppBaa=D}uu^@&eZLwy;`dhxPCMNCF z?s#a&`+G|XThh`!gb^vN>veQodY)tj&np^Fwk-+P&>RF{eH*}UZzMVC>+V!}vZl|p7Jj36snsBE?S9;ps zo6C}_`%dmdP%V&($8s5bGUHGG#iHvuFrmF_&p7GKgX$8S<%~ebTN6mkRX65z* z5!7zkFxiGk2_EC6OB=!pWz?OAGutOe02i@#6yqNXj^-Mw^?I9+l=+`qAvPInqxa1= zm2>v;DIIxL1`8(yC^;ZbM^;uT-!VM(km9<7>Dqn6ST}+me|5k?kleNc1k_(J|8NLB z=kp!Yu_O@ox*=rt#+)kM&*dC`>TX(wdn{|<;^Ae+6UEu6#9T%DJCE1#idI=xaNtq} zPyCjd=_6U*?9$i{SC-EvgHg);xUoEM{s49c!P2#1!KA>ew9i=!4KE3(#;4jYkO?Bb z-tmr|g0nxWy7IKuLJ)8*Xjj4ceHCyRw0(GB{zZt?44Pi5UO15ekNcE*>>-k+Iwqi* zgHm@-Js`bWv=Kf*pF(MJn9RbzF5YH4_YC2uDpTP;KNQ;y4kM2@oZO4&SN54$>*9#W zWem-;JQbu66c;;dQWS3iky;;}dlfVL>jdmv_w%2Hkn`2E8r_sXTUSiony3qlr&ZU0 z)D4z~ALsjiuV8o`qH${Au$JaO$fnp8duBRnQ_*sEFYt7rM%u_|$VWGa1ptgwzTKPD z@WWOBB$nK(C}&&@(lM4^fs4BPLBnzt8>nNs*MHT1%jk1#Z1w>WUXHp|sOoAPoi4^V zh6BMK%k~uyTI*U;E?3#COt}ucA;77sOx|={wz645_%gTbm9OQK*Z)#bZPLi#;_MN^ z>rn}r546OE?P3uqC}N5GwGQ3atYIPDqJFKpTH5k?m%R1fH;SU1>1$TNFeP_<>~>70 z^DvmcTW^8yhU?hA$O2ZNBNB-XMCG?6InL;ibS<{m@U*giVQjlC^yT_Y(|*=NoP0dz zaSoJ-!t*L2Hc5s7Tj$S}34FO&#}l~rjUS{uH!?ed&6BSAggWm2W-+UX?@v!DP*EM~ z9lXO0DAx|@WZ18&-1#hd?O0$#F25Xpwua9L-lbx1p9s>WFX1bCFEcr2z{9=28ZC}1 z{$o;Mdu#c1&hOT7#WH`M+UBp`x~2~u3-c-7l=psiOlyjliRy^x3go8h=yca$q|g?% z00qU{^bfcTe~?hYTOF2hy^|*vNUnhfv*L*z*>hPxJpm-|o`}zqS*_Xl)+8C56P=B; zW}X8+5|1|A^)0lwaXxj!FNJpWD5N7fNw;E!=-%@rWlyB!_j4HU3-%k8N4~_dm#Ts7 z1pN#mbXAdFmo@)LzkQroPgmq$t~>ODRtPLaoops#V!GExXv=dzRPJ1liFZ8o%Yn>v zYn$I5FV>;B1xXa^zut*768|(jedgG%kf%`0zGkV2ShSI76>+zaLP#V;&=~|Ow+`t{ zc7k==gxXE6D`6I7DFc*0H=9U;jbk#MK z-CyiBV7+))V%y0h(>1!QrEBcC7@5;OCq`do1_|A;IsTwF|4hJIaO9NfeHZt~Yqo>W zkuIkIp&~oiN;&nNI@HZosuf%}mg7y?#t{-*%54TAehZ;M*@EoYbCbksmXp%+C)KfBnt=_DeMX=$HN;zx-SM60wV5 KA_ORA0R96fhyvFD literal 0 HcmV?d00001 From 85c37eb001dd102a720916e22a89d119be8998f8 Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Thu, 20 Jun 2024 15:34:16 -0700 Subject: [PATCH 10/27] confirmed that data and smeared MC moller peaks match! Adding in other cleaning since I was at it...even though its messy, sorry --- .../track_bias_corrections_data_2016.json | 4 + .../track_bias_corrections_tritrig_2016.json | 5 + .../v0_projection_2016_mc_7800_config.json | 10 - .../data/v0_projection_2016_mc_config.json | 10 + .../v0_projection_2016_mc_signal_config.json | 10 - .../vtxAnalysis_2016_simp_reach_light.json | 324 ++++++++++++++++++ .../simps/radMatchTight_L1L1_nvtx1.json | 17 + .../simps/radMatchTight_nocuts.json | 7 + processors/config/anaSimps_2016_cfg.py | 16 +- utils/src/TrackSmearingTool.cxx | 4 +- 10 files changed, 378 insertions(+), 29 deletions(-) create mode 100644 analysis/data/track_bias_corrections_data_2016.json create mode 100644 analysis/data/track_bias_corrections_tritrig_2016.json delete mode 100644 analysis/data/v0_projection_2016_mc_7800_config.json create mode 100644 analysis/data/v0_projection_2016_mc_config.json delete mode 100644 analysis/data/v0_projection_2016_mc_signal_config.json create mode 100644 analysis/plotconfigs/tracking/simps/vtxAnalysis_2016_simp_reach_light.json create mode 100644 analysis/selections/simps/radMatchTight_L1L1_nvtx1.json create mode 100644 analysis/selections/simps/radMatchTight_nocuts.json diff --git a/analysis/data/track_bias_corrections_data_2016.json b/analysis/data/track_bias_corrections_data_2016.json new file mode 100644 index 000000000..3f52f0ca3 --- /dev/null +++ b/analysis/data/track_bias_corrections_data_2016.json @@ -0,0 +1,4 @@ +{ + "track_time": -1.5 +} + diff --git a/analysis/data/track_bias_corrections_tritrig_2016.json b/analysis/data/track_bias_corrections_tritrig_2016.json new file mode 100644 index 000000000..cda3a44b4 --- /dev/null +++ b/analysis/data/track_bias_corrections_tritrig_2016.json @@ -0,0 +1,5 @@ +{ + "track_time": -5.5, + "track_z0": -0.06 +} + diff --git a/analysis/data/v0_projection_2016_mc_7800_config.json b/analysis/data/v0_projection_2016_mc_7800_config.json deleted file mode 100644 index cd6f5875c..000000000 --- a/analysis/data/v0_projection_2016_mc_7800_config.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "7800": { - "target_position": -4.3, - "rotated_mean_x": -2.31164e-01, - "rotated_mean_y": -5.15632e-02, - "rotated_sigma_x": 2.01293e-01, - "rotated_sigma_y": 8.16385e-02, - "rotation_angle_mrad": -120.719 - } -} diff --git a/analysis/data/v0_projection_2016_mc_config.json b/analysis/data/v0_projection_2016_mc_config.json new file mode 100644 index 000000000..fc18e19aa --- /dev/null +++ b/analysis/data/v0_projection_2016_mc_config.json @@ -0,0 +1,10 @@ +{ + "7984": { + "target_position": -4.3, + "rotated_mean_x": -0.23135574671453285, + "rotated_mean_y": -0.02398086113913096, + "rotated_sigma_x": 0.2109738212614317, + "rotated_sigma_y": 0.08129743797473131, + "rotation_angle_mrad": -103.56647336269294 + } +} diff --git a/analysis/data/v0_projection_2016_mc_signal_config.json b/analysis/data/v0_projection_2016_mc_signal_config.json deleted file mode 100644 index 32f1a431a..000000000 --- a/analysis/data/v0_projection_2016_mc_signal_config.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "7800": { - "target_position": -4.3, - "rotated_mean_x": 0.00032589410352742634, - "rotated_mean_y": 0.0005273697881972964, - "rotated_sigma_x": 0.19701452441142028, - "rotated_sigma_y": 0.09424252765235556, - "rotation_angle_mrad": -171.21036357655905 - } -} diff --git a/analysis/plotconfigs/tracking/simps/vtxAnalysis_2016_simp_reach_light.json b/analysis/plotconfigs/tracking/simps/vtxAnalysis_2016_simp_reach_light.json new file mode 100644 index 000000000..9b477afd9 --- /dev/null +++ b/analysis/plotconfigs/tracking/simps/vtxAnalysis_2016_simp_reach_light.json @@ -0,0 +1,324 @@ +{ + "ele_track_cluster_EoverP_h": { + "bins" : 1000, + "minX" : 0.0, + "maxX" : 10.0, + "xtitle" : "e^{-} Track Cluster E/P", + "ytitle" : "Events" + }, + "pos_track_cluster_EoverP_h": { + "bins" : 1000, + "minX" : 0.0, + "maxX" : 10.0, + "xtitle" : "e^{+} Track Cluster E/P", + "ytitle" : "Events" + }, + "corr_eleClus_t_h": { + "bins" : 600, + "minX" : -30.0, + "maxX" : 30.0, + "xtitle" : "e^{-}_{clus} corr time [ns]", + "ytitle" : "Events" + }, + "corr_posClus_t_h": { + "bins" : 600, + "minX" : -30.0, + "maxX" : 30.0, + "xtitle" : "e^{+}_{clus} corr time [ns]", + "ytitle" : "Events" + }, + "ele_hitlayers_h": { + "bins" : 12, + "minX" : -0.5, + "maxX": 11.5, + "xtitle" : "hit layer", + "ytitle" : "nHits on Track" + }, + "pos_hitlayers_h": { + "bins" : 12, + "minX" : -0.5, + "maxX": 11.5, + "xtitle" : "hit layer", + "ytitle" : "nHits on Track" + }, + "ele_pos_clusTimeDiff_h": { + "bins" : 320, + "minX" : -16, + "maxX" : 16, + "xtitle" : "#Delta_{t}(e^{-},e^{+})_{clus} [ns]", + "ytitle" : "Events" + }, + "ele_track_clus_dt_h": { + "bins" : 200, + "minX" : -20, + "maxX" : 20, + "xtitle" : "e^{-} Track Cluster #Delta_{t} [ns]", + "ytitle" : "Events" + }, + "pos_track_clus_dt_h": { + "bins" : 200, + "minX" : -20, + "maxX" : 20, + "xtitle" : "e^{+} Track Cluster #Delta_{t} [ns]", + "ytitle" : "Events" + }, + "ele_d0_h" : { + "bins" : 200, + "minX" : -10, + "maxX" : 10, + "xtitle" : "d_{0} [mm]", + "ytitle" : "Tracks" + }, + "ele_Phi_h" : { + "bins" : 100, + "minX" : -0.4, + "maxX" : 0.4, + "xtitle" : "#phi_{0}", + "ytitle" : "Tracks" + }, + "ele_Omega_h" : { + "bins" : 100, + "minX" : -0.001, + "maxX" : 0.001, + "xtitle" : "#omega", + "ytitle" : "Tracks" + }, + "ele_TanLambda_h" : { + "bins" : 200, + "minX" : -0.2, + "maxX" : 0.2, + "xtitle" : "tan(#lambda)", + "ytitle" : "Tracks" + }, + "ele_Z0_h" : { + "bins" : 200, + "minX" : -5, + "maxX" : 5, + "xtitle" : "z_{0} [mm]", + "ytitle" : "Tracks" + }, + "ele_time_h" : { + "bins" : 200, + "minX" : -10, + "maxX" : 10, + "xtitle" : "track time [ns]", + "ytitle" : "Tracks" + }, + "ele_chi2ndf_h" : { + "bins" : 200, + "minX" : 0, + "maxX" : 30, + "xtitle" : "ele track #chi^{2} / ndf", + "ytitle" : "Tracks" + }, + "ele_chi2_h" : { + "bins" : 200, + "minX" : 0, + "maxX" : 20, + "xtitle" : "track #chi^{2}", + "ytitle" : "Tracks" + }, + "ele_p_h" : { + "bins" : 250, + "minX" : 0, + "maxX" : 2.5, + "xtitle" : "p_{e^{-}} [GeV]", + "ytitle" : "Tracks" + }, + "pos_d0_h" : { + "bins" : 200, + "minX" : -10, + "maxX" : 10, + "xtitle" : "d_{0} [mm]", + "ytitle" : "Tracks" + }, + "pos_Phi_h" : { + "bins" : 100, + "minX" : -0.4, + "maxX" : 0.4, + "xtitle" : "#phi_{0}", + "ytitle" : "Tracks" + }, + "pos_Omega_h" : { + "bins" : 100, + "minX" : -0.001, + "maxX" : 0.001, + "xtitle" : "#omega", + "ytitle" : "Tracks" + }, + "pos_TanLambda_h" : { + "bins" : 200, + "minX" : -0.2, + "maxX" : 0.2, + "xtitle" : "tan(#lambda)", + "ytitle" : "Tracks" + }, + "pos_Z0_h" : { + "bins" : 200, + "minX" : -5, + "maxX" : 5, + "xtitle" : "z_{0} [mm]", + "ytitle" : "Tracks" + }, + "pos_time_h" : { + "bins" : 200, + "minX" : -10, + "maxX" : 10, + "xtitle" : "track time [ns]", + "ytitle" : "Tracks" + }, + "pos_chi2ndf_h" : { + "bins" : 200, + "minX" : 0, + "maxX" : 30, + "xtitle" : "pos track #chi^{2} / ndf", + "ytitle" : "Tracks" + }, + "pos_chi2_h" : { + "bins" : 200, + "minX" : 0, + "maxX" : 20, + "xtitle" : "track #chi^{2}", + "ytitle" : "Tracks" + }, + "pos_p_h" : { + "bins" : 250, + "minX" : 0, + "maxX" : 2.5, + "xtitle" : "p_{e^{+}} [GeV]", + "ytitle" : "Tracks" + }, + "vtx_chi2_h" : { + "bins" : 200, + "minX" : 0, + "maxX" : 30, + "xtitle" : "vtx #chi^{2}", + "ytitle" : "Vertices" + }, + "vtx_X_svt_h" : { + "bins" : 200, + "minX" : -5, + "maxX" : 5, + "xtitle" : "vtx X pos [mm]", + "ytitle" : "Vertices" + }, + "vtx_Y_svt_h" : { + "bins" : 200, + "minX" : -5, + "maxX" : 5, + "xtitle" : "vtx Y pos [mm]", + "ytitle" : "Vertices" + }, + "vtx_Z_svt_h" : { + "bins" : 200, + "minX" : -50, + "maxX" : 50, + "xtitle" : "vtx Z pos [mm]", + "ytitle" : "Vertices" + }, + "vtx_sigma_X_h" : { + "bins" : 100, + "minX" : 0, + "maxX" : 5, + "xtitle" : "vtx #sigma_{x} [mm]", + "ytitle" : "Vertices" + }, + "vtx_sigma_Y_h" : { + "bins" : 100, + "minX" : 0, + "maxX" : 5, + "xtitle" : "vtx #sigma_{y} [mm]", + "ytitle" : "Vertices" + }, + "vtx_sigma_Z_h" : { + "bins" : 100, + "minX" : 0, + "maxX" : 5, + "xtitle" : "vtx #sigma_{z} [mm]", + "ytitle" : "Vertices" + }, + "vtx_InvM_h" : { + "bins" : 200, + "minX" : 0, + "maxX" : 0.2, + "xtitle" : "vtx Mass [GeV]", + "ytitle" : "Vertices" + }, + "vtx_InvMErr_Z_h" : { + "bins" : 100, + "minX" : 0, + "maxX" : 0.05, + "xtitle" : "vtx #sigma_{z} [mm]", + "ytitle" : "Vertices" + }, + "vtx_px_h" : { + "bins" : 300, + "minX" : -1.5, + "maxX" : 1.5, + "xtitle" : "vtx p_{x} [GeV]", + "ytitle" : "Vertices" + }, + "vtx_py_h" : { + "bins" : 300, + "minX" : -1.5, + "maxX" : 1.5, + "xtitle" : "vtx p_{y} [GeV]", + "ytitle" : "Vertices" + }, + "vtx_pz_h" : { + "bins" : 350, + "minX" : 0, + "maxX" : 3.5, + "xtitle" : "vtx p_{z} [GeV]", + "ytitle" : "Vertices" + }, + "vtx_p_h" : { + "bins" : 350, + "minX" : 0, + "maxX" : 3.5, + "xtitle" : "vtx p [GeV]", + "ytitle" : "Vertices" + }, + "vtx_Psum_h" : { + "bins" : 350, + "minX" : 0, + "maxX" : 3.5, + "xtitle" : "vtx p_{sum} [GeV]", + "ytitle" : "Vertices" + }, + "vtx_Esum_h" : { + "bins" : 350, + "minX" : 0, + "maxX" : 3.5, + "xtitle" : "vtx E_{sum} [GeV]", + "ytitle" : "Vertices" + }, + "Esum_h" : { + "bins" : 350, + "minX" : 0, + "maxX" : 3.5, + "xtitle" : "E_{e^{-}} + E_{e^{+}} [GeV]", + "ytitle" : "Events" + }, + "Psum_h" : { + "bins" : 350, + "minX" : 0, + "maxX" : 3.5, + "xtitle" : "p_{e^{-}} + p_{e^{+}} [GeV]", + "ytitle" : "Events" + }, + "hitCode_h" : { + "bins" : 16, + "minX" : -0.5, + "maxX" : 15.5, + "xtitle" : "hit code", + "ytitle" : "Tracks" + }, + "vtx_proj_significance_h": { + "bins" : 200, + "minX" : 0.0, + "maxX" : 10.0, + "xtitle" : "vtx proj significance N#sigma", + "ytitle" : "Events" + } +} diff --git a/analysis/selections/simps/radMatchTight_L1L1_nvtx1.json b/analysis/selections/simps/radMatchTight_L1L1_nvtx1.json new file mode 100644 index 000000000..add042862 --- /dev/null +++ b/analysis/selections/simps/radMatchTight_L1L1_nvtx1.json @@ -0,0 +1,17 @@ +{ + "L1Requirement_eq": { + "cut": 1, + "id": 0, + "info": "L1L1" + }, + "isRadEle_eq" : { + "cut" : 1, + "id" : 1, + "info" : "isRadEle" + }, + "nVtxs_eq": { + "cut": 1, + "id": 2, + "info": "N_{vtx}=1" + } +} diff --git a/analysis/selections/simps/radMatchTight_nocuts.json b/analysis/selections/simps/radMatchTight_nocuts.json new file mode 100644 index 000000000..2989e5ddd --- /dev/null +++ b/analysis/selections/simps/radMatchTight_nocuts.json @@ -0,0 +1,7 @@ +{ + "isRadEle_eq" : { + "cut" : 1, + "id" : 0, + "info" : "isRadEle" + } +} diff --git a/processors/config/anaSimps_2016_cfg.py b/processors/config/anaSimps_2016_cfg.py index 6d3f2ce1c..c8a55823d 100644 --- a/processors/config/anaSimps_2016_cfg.py +++ b/processors/config/anaSimps_2016_cfg.py @@ -58,14 +58,13 @@ vtxana.parameters["beamPosCfg"] = "" if options.isData and options.year == 2016: vtxana.parameters["v0ProjectionFitsCfg"] = os.environ['HPSTR_BASE']+'/analysis/data/v0_projection_2016_config.json' - vtxana.parameters["trackBiasCfg"] = os.environ['HPSTR_BASE']+'/utils/data/track_bias_corrections_data_2016.json' + vtxana.parameters["trackBiasCfg"] = os.environ['HPSTR_BASE']+'/analysis/data/track_bias_corrections_data_2016.json' elif not options.isData and options.year == 2016: print('Running MC') - vtxana.parameters["trackBiasCfg"] = os.environ['HPSTR_BASE']+'/utils/data/track_bias_corrections_tritrig_2016.json' - #vtxana.parameters["pSmearingFile"] = os.environ['HPSTR_BASE']+'/utils/data/smearingFile_2016_all_12112023.root' - #vtxana.parameters["pSmearingSeed"] = options.pSmearingSeed - vtxana.parameters["v0ProjectionFitsCfg"] = os.environ['HPSTR_BASE']+'/utils/data/vertex_proj_beamspot_tritrig_2016.json' #For tritrig and wab mc - #vtxana.parameters["v0ProjectionFitsCfg"] = os.environ['HPSTR_BASE']+'/analysis/data/v0_projection_2016_mc_signal_config.json' #For signal (accidentally gen with bspt=(0,0) THIS NEEDS TO CHANGE AS OF 04/29/24. New samples have different beamspots + vtxana.parameters["trackBiasCfg"] = os.environ['HPSTR_BASE']+'/analysis/data/track_bias_corrections_tritrig_2016.json' + vtxana.parameters["pSmearingFile"] = os.environ['HPSTR_BASE']+'utils/data/fee_smearing/smearingFile_2016_all_20240620.root' + vtxana.parameters["pSmearingSeed"] = options.pSmearingSeed + vtxana.parameters["v0ProjectionFitsCfg"] = os.environ['HPSTR_BASE']+'/analysis/data/v0_projection_2016_mc_config.json' CalTimeOffset = -999 @@ -89,7 +88,10 @@ RegionPath+'Tight_L1L1_nvtx1.json'] if(options.isData != 1): RegionDefinitions.extend([RegionPath+'radMatchTight_2016_simp_reach_CR.json', - RegionPath+'radMatchTight_2016_simp_SR_analysis.json']) + RegionPath+'radMatchTight_2016_simp_SR_analysis.json', + RegionPath+'radMatchTight_nocuts.json', + RegionPath+'radMatchTight_L1L1_nvtx1.json'] + ) vtxana.parameters["regionDefinitions"] = RegionDefinitions diff --git a/utils/src/TrackSmearingTool.cxx b/utils/src/TrackSmearingTool.cxx index fd5e29a59..5cf0b5055 100644 --- a/utils/src/TrackSmearingTool.cxx +++ b/utils/src/TrackSmearingTool.cxx @@ -18,8 +18,8 @@ TrackSmearingTool::TrackSmearingTool(const std::string& smearingfile, throw std::invalid_argument( "Provided input smearing file does not exists"); //cache the smearing histograms - smearing_histo_top_ = (TH1D*) smearingfile_->Get((tracks+"_p_vs_nHits_hh_smearing"+hsuffix).c_str()); - smearing_histo_bot_ = (TH1D*) smearingfile_->Get((tracks+"_p_vs_nHits_hh_smearing"+hsuffix).c_str()); + smearing_histo_top_ = (TH1D*) smearingfile_->Get((tracks+"_p_vs_nHits_hh_smearing"+hsuffix+"_top").c_str()); + smearing_histo_bot_ = (TH1D*) smearingfile_->Get((tracks+"_p_vs_nHits_hh_smearing"+hsuffix+"_bot").c_str()); if (!smearing_histo_top_ || !smearing_histo_bot_) throw std::invalid_argument("Top and Bottom smearing histograms not found in smearing file"); From 5b1e868b7fbfde1d6695b0d7253e9db16b19130e Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Thu, 20 Jun 2024 17:03:50 -0700 Subject: [PATCH 11/27] when I added ele->setTrack in TrackSmearingTool, all changes to the track persist. Since there are some small track bias corrections in the ana processor, these were being applied in preselection, the track was permantently updated, then they were applied again at the other selection stages. So I moved this ele->setTrack outside of TrackSmearingTool, and removed the duplicate track bias corrections --- processors/src/NewVertexAnaProcessor.cxx | 40 ++++-------------------- utils/include/TrackSmearingTool.h | 1 - utils/src/TrackSmearingTool.cxx | 14 --------- 3 files changed, 6 insertions(+), 49 deletions(-) diff --git a/processors/src/NewVertexAnaProcessor.cxx b/processors/src/NewVertexAnaProcessor.cxx index 99f31bdee..70121bb23 100644 --- a/processors/src/NewVertexAnaProcessor.cxx +++ b/processors/src/NewVertexAnaProcessor.cxx @@ -420,11 +420,6 @@ bool NewVertexAnaProcessor::process(IEvent* ievent) { ele_trk.applyCorrection(pair.first, pair.second); pos_trk.applyCorrection(pair.first, pair.second); } - //ele_trk.applyCorrection("z0", beamPosCorrections_.at(1)); - //pos_trk.applyCorrection("z0", beamPosCorrections_.at(1)); - // Track Time Corrections - //ele_trk.applyCorrection("track_time",eleTrackTimeBias_); - //pos_trk.applyCorrection("track_time", posTrackTimeBias_); // Correct for the momentum bias @@ -450,11 +445,15 @@ bool NewVertexAnaProcessor::process(IEvent* ievent) { if (smearingTool_ and !isData_) { double unsmeared_prod = ele_trk.getP()*pos_trk.getP(); - double ele_smf = smearingTool_->updateWithSmearP(ele_trk, ele); - double pos_smf = smearingTool_->updateWithSmearP(pos_trk, pos); + double ele_smf = smearingTool_->updateWithSmearP(ele_trk); + double pos_smf = smearingTool_->updateWithSmearP(pos_trk); smearingTool_->updateVertexWithSmearP(vtx, ele_smf, pos_smf); } + //After all modifications to the track are made, update the memory address that the particle gets the track from + //with the modified track, that way changes here persist throughout this processor + ele->setTrack(&ele_trk); + pos->setTrack(&pos_trk); //Add the momenta to the tracks - do not do that //ele_trk.setMomentum(ele->getMomentum()[0],ele->getMomentum()[1],ele->getMomentum()[2]); @@ -745,20 +744,6 @@ bool NewVertexAnaProcessor::process(IEvent* ievent) { Track ele_trk = ele->getTrack(); Track pos_trk = pos->getTrack(); - //Apply Track Bias Corrections - for (const auto& pair : trackBiasCorrections_){ - ele_trk.applyCorrection(pair.first, pair.second); - pos_trk.applyCorrection(pair.first, pair.second); - } - - /* - //Beam Position Corrections - ele_trk.applyCorrection("z0", beamPosCorrections_.at(1)); - pos_trk.applyCorrection("z0", beamPosCorrections_.at(1)); - //Track Time Corrections - ele_trk.applyCorrection("track_time",eleTrackTimeBias_); - pos_trk.applyCorrection("track_time", posTrackTimeBias_); - */ if (biasingTool_) { //Correct the wrong Bfield first @@ -1159,19 +1144,6 @@ bool NewVertexAnaProcessor::process(IEvent* ievent) { Track ele_trk = ele->getTrack(); Track pos_trk = pos->getTrack(); //Get the shared info - TODO change and improve - // - //Apply Track Bias Corrections - for (const auto& pair : trackBiasCorrections_){ - ele_trk.applyCorrection(pair.first, pair.second); - pos_trk.applyCorrection(pair.first, pair.second); - } - /* - //Track Time Corrections - ele_trk.applyCorrection("track_time",eleTrackTimeBias_); - pos_trk.applyCorrection("track_time", posTrackTimeBias_); - ele_trk.applyCorrection("z0", beamPosCorrections_.at(1)); - pos_trk.applyCorrection("z0", beamPosCorrections_.at(1)); - */ // Track Momentum bias diff --git a/utils/include/TrackSmearingTool.h b/utils/include/TrackSmearingTool.h index 9940938a8..c0a223af2 100644 --- a/utils/include/TrackSmearingTool.h +++ b/utils/include/TrackSmearingTool.h @@ -29,7 +29,6 @@ class TrackSmearingTool { const std::string& tracks = "KalmanFullTracks"); double smearTrackP(const Track& trk); - double updateWithSmearP(Track& trk, Particle* part); double updateWithSmearP(Track& trk); void updateVertexWithSmearP(Vertex* vtx, double ele_smear_factor, double pos_smear_factor); diff --git a/utils/src/TrackSmearingTool.cxx b/utils/src/TrackSmearingTool.cxx index 5cf0b5055..ec08922e5 100644 --- a/utils/src/TrackSmearingTool.cxx +++ b/utils/src/TrackSmearingTool.cxx @@ -75,20 +75,6 @@ double TrackSmearingTool::smearTrackP(const Track& track) { } -double TrackSmearingTool::updateWithSmearP(Track& trk, Particle* part) { - double smeared_magnitude = smearTrackP(trk); - // updated momentum by scaling each coordinate by smeared/unsmeared - // this takes the direction of the unsmeared momentum and applies - // the smeared magnitude - std::vector momentum = trk.getMomentum(); - double unsmeared_magnitude = trk.getP(); - for (double& coordinate : momentum) - coordinate *= (smeared_magnitude/unsmeared_magnitude); - trk.setMomentum(momentum); - part->setTrack(&trk); - return (smeared_magnitude/unsmeared_magnitude); -} - double TrackSmearingTool::updateWithSmearP(Track& trk) { double smeared_magnitude = smearTrackP(trk); // updated momentum by scaling each coordinate by smeared/unsmeared From 4c6b5f321f607ebc2f7d328c8b83252b1b622cd4 Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Thu, 20 Jun 2024 17:04:56 -0700 Subject: [PATCH 12/27] typo in path --- processors/config/anaSimps_2016_cfg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/processors/config/anaSimps_2016_cfg.py b/processors/config/anaSimps_2016_cfg.py index c8a55823d..291f0519a 100644 --- a/processors/config/anaSimps_2016_cfg.py +++ b/processors/config/anaSimps_2016_cfg.py @@ -62,7 +62,7 @@ elif not options.isData and options.year == 2016: print('Running MC') vtxana.parameters["trackBiasCfg"] = os.environ['HPSTR_BASE']+'/analysis/data/track_bias_corrections_tritrig_2016.json' - vtxana.parameters["pSmearingFile"] = os.environ['HPSTR_BASE']+'utils/data/fee_smearing/smearingFile_2016_all_20240620.root' + vtxana.parameters["pSmearingFile"] = os.environ['HPSTR_BASE']+'/utils/data/fee_smearing/smearingFile_2016_all_20240620.root' vtxana.parameters["pSmearingSeed"] = options.pSmearingSeed vtxana.parameters["v0ProjectionFitsCfg"] = os.environ['HPSTR_BASE']+'/analysis/data/v0_projection_2016_mc_config.json' From d1a23b070432fc5db4579609401886fde458ce9b Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Mon, 24 Jun 2024 11:09:07 -0700 Subject: [PATCH 13/27] newest momentum smearing using KF tracks, split tob bottom by nhits --- analysis/selections/trackHit/trackHitAna.json | 32 ++++++++++++++---- processors/config/anaFeeSmearing_cfg.py | 2 +- processors/config/anaSimps_2016_cfg.py | 4 +-- .../fee_smearing/fee_smearing_nhits_2016.py | 10 +++--- .../smearingFile_2016_all_20240620.root | Bin 4556 -> 4556 bytes .../data/smearingFile_2016_all_12112023.root | Bin 5413 -> 0 bytes 6 files changed, 34 insertions(+), 14 deletions(-) delete mode 100644 utils/data/smearingFile_2016_all_12112023.root diff --git a/analysis/selections/trackHit/trackHitAna.json b/analysis/selections/trackHit/trackHitAna.json index 9f8ba19cb..83cd1b868 100644 --- a/analysis/selections/trackHit/trackHitAna.json +++ b/analysis/selections/trackHit/trackHitAna.json @@ -5,18 +5,38 @@ "info" : "N Hits >= 10" }, "chi2ndf_lt" : { - "cut" : 10.0, + "cut" : 6.0, "id" : 1, - "info" : "#chi^{2}/ndf <= 10.0" + "info" : "#chi^{2}/ndf <= 6.0" }, "p_gt" : { - "cut" : 0.4, + "cut" : 1.0, "id" : 2, - "info" : "p > 0.4" + "info" : "p > 1.0" }, "p_lt" : { - "cut" : 6.0, + "cut" : 4.0, "id" : 3, - "info" : "p < 6.0" + "info" : "p < 4.0" + }, + "trk_ecal_lt" : { + "cut" : 50.0, + "id" : 4, + "info" : "trk ecal x < 50 mm" + }, + "trk_ecal_gt" : { + "cut" : -100.0, + "id" : 5, + "info" : "trk ecal x > -100 mm" + }, + "trk_time_gt" : { + "cut" : -20.0, + "id" : 6, + "info" : "trk t > -20 ns" + }, + "trk_time_lt" : { + "cut" : 20.0, + "id" : 7, + "info" : "trk t < 20 ns" } } diff --git a/processors/config/anaFeeSmearing_cfg.py b/processors/config/anaFeeSmearing_cfg.py index e045112f4..2151d52c8 100644 --- a/processors/config/anaFeeSmearing_cfg.py +++ b/processors/config/anaFeeSmearing_cfg.py @@ -34,7 +34,7 @@ anaTrks.parameters["isData"] = options.isData #SmearingClosureTest -anaTrks.parameters["pSmearingFile"] = os.environ['HPSTR_BASE']+"/utils/data/smearingFile_2016_all_12112023.root" +anaTrks.parameters["pSmearingFile"] = os.environ['HPSTR_BASE']+"/utils/data/smearingFile_2016_all_20240620.root" RegionPath = os.environ['HPSTR_BASE']+"/analysis/selections/feeSmearing/" anaTrks.parameters["regionDefinitions"] = [] diff --git a/processors/config/anaSimps_2016_cfg.py b/processors/config/anaSimps_2016_cfg.py index 291f0519a..956371794 100644 --- a/processors/config/anaSimps_2016_cfg.py +++ b/processors/config/anaSimps_2016_cfg.py @@ -47,8 +47,8 @@ vtxana.parameters["analysis"] = "vertex" vtxana.parameters["vtxSelectionjson"] = os.environ['HPSTR_BASE']+"/analysis/selections/simps/vertexSelection_2016_simp_preselection.json" #vtxana.parameters["vtxSelectionjson"] = os.environ['HPSTR_BASE']+"/analysis/selections/simps/vertexSelection_2016_simp_nocuts.json" -#vtxana.parameters["histoCfg"] = os.environ['HPSTR_BASE']+"/analysis/plotconfigs/tracking/simps/vtxAnalysis_2016_simp_reach_light.json" -vtxana.parameters["histoCfg"] = os.environ['HPSTR_BASE']+"/analysis/plotconfigs/tracking/simps/vtxAnalysis_2016_simp_reach.json" +vtxana.parameters["histoCfg"] = os.environ['HPSTR_BASE']+"/analysis/plotconfigs/tracking/simps/vtxAnalysis_2016_simp_reach_light.json" +#vtxana.parameters["histoCfg"] = os.environ['HPSTR_BASE']+"/analysis/plotconfigs/tracking/simps/vtxAnalysis_2016_simp_reach.json" vtxana.parameters["mcHistoCfg"] = os.environ['HPSTR_BASE']+'/analysis/plotconfigs/mc/basicMC.json' ##### vtxana.parameters["beamE"] = base.beamE[str(options.year)] diff --git a/utils/data/fee_smearing/fee_smearing_nhits_2016.py b/utils/data/fee_smearing/fee_smearing_nhits_2016.py index 09057f2a9..846368986 100644 --- a/utils/data/fee_smearing/fee_smearing_nhits_2016.py +++ b/utils/data/fee_smearing/fee_smearing_nhits_2016.py @@ -23,14 +23,14 @@ def gaus_fit(histo, xmin, xmax, smean, swidth, snorm, nsigma=2.0, isData=False): #iterate over randomly fluctuated fit parameters. Keep the best resulting fit niters = 100 for n in range(niters): - norm = params[0]*np.random.uniform(80,120)*0.01 - mu = params[1]*np.random.uniform(80,120)*0.01 - sigma = params[2]*np.random.uniform(80,120)*0.01 + norm = params[0]#*np.random.uniform(80,120)*0.01 + mu = params[1]#*np.random.uniform(80,120)*0.01 + sigma = params[2]#*np.random.uniform(80,120)*0.01 #Data has shoulders, so we can specify the xmin and xmax to do an asymmetric fit window + xminx = mu - nsigma*sigma + xmaxx = mu + nsigma*sigma if isData: - xminx = mu - nsigma*sigma - xmaxx = mu + nsigma*sigma if xminx < xmin: xminx = xmin if xmaxx > xmax: diff --git a/utils/data/fee_smearing/smearingFile_2016_all_20240620.root b/utils/data/fee_smearing/smearingFile_2016_all_20240620.root index 81fc9c57d728225c3a978c409cbf50b258853a02..f0ec08b674261b5c2de3a1ee7148adae07c21c85 100644 GIT binary patch delta 483 zcmX@3d`5YKzChB=d%q0$1>a9m;x&|5v+w;xM>SpshGh&a3=9m3rK@glo>&yG4x~U# z1_nlXpl}utrvNb!u=p}CB21Y0LzWq+Y_kMoBcq~k?S|uKk#D-z-rFW8AkVPOKlMIP z|3Z+#Ow}OMCiAn#P8MgDt?!PNktC@s zXIL_wcyLj?-uhut{MW^IJe?=aT${+2Q8P>S?Amtm^`&xzH^PkBP~PcE}CALY+Do3I6M8yfd{eGds!w({BUBeTl_E~f?wtz z|B3cW(Y#vcrzth{Im_-RTN-a=c=m8Z@hQ!JJ2!VOJ0^d2&ZqMC-~KNCwt2s$*VjMX q3A`zvz#$Hbid*0ii)L{H1@L4E##la#z{~)LeHAMo4_HmNpd$cA8qv!D delta 483 zcmX@3d`5YKzQFz|*30z01mAD8;5C$3v+w;xM>SpshGh&a3=9m3r7NCXm{=6A4x~U# z1_nlXpl}utrvNb!u=p}CB21Y0LzWq+Y_kMoBctNojd!n_J)F5NcSgHV2S3BY(o_zh z{)HfenW{miP3C8foh;5QThCYak@?1i*)9d80RnxSx2VlnGRY-(?yfSS{S6|2l#cE^ zSI2sHmj&me-P&17DIZzy{VjgK4@4~ zA?wc@&#@X9T=~jXx?rXEGY#q3t9n(E;@1uz7u~Vbjh83w)@#*1ZD;}?5kM$c))791swq({nLQ} diff --git a/utils/data/smearingFile_2016_all_12112023.root b/utils/data/smearingFile_2016_all_12112023.root deleted file mode 100644 index 63031e483d29e4c0f7c3a5ad03c73d657f6df8aa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5413 zcmc(j2T)Vnx`1~e1dz}n0Yr+@dy@_)0i=T{MS2oiD4`}4rAJT^kSbk3MMRp2(p#j1 zC?G0GQ4r}x5IrdM2F^X_ymw~ad*{A4^KNE#_MWx>wbz>O`)BX@{{UZK0sw4v0|4Lx z0PKPQz_v-gRv;gsqcfQN=L!IjApn5V0id^R4CPt6%gGxZ535TRq+jsGg@$bL(xQ~ZvykV3BIwAe;punga3J=IG zBOi;BAIS$Jh@?a6UL0;0Bf?Tww9zH4d~7GGHdJI}Ap>=AJa^5{Q_%l#Gb ze@`40;EMZSOd*q>2Y~c{lFuQLR!Jm_F47E%WY$Tdi9wJCNF>bXJ6UPDSqoMlEa#VgX1t-u%;Z2TBx2&8h-ZkN(rwEaDzU5 zm|_|5qIwPel@}r4GaWV6Zb*a?6N>J|T7o47Mz531!jU3u=oviP37SQFAM?qKE6dRj zpoTz@5G(hD=ajq#vNB}m^c-$MN<~NoV0h4$lgvEzn0dxAbKfK8l#PEd7yc`Aa#9E3 zQ9k+}1U$<9&-DJgab&t&ARyy+x}C>#+mGlnb^K2E?)qQoa-*0~R@?;(DK;0m3sU}o zY+9~b`13cwYEj34HU1;uat0{00cwx9$kBHeTFxHFp4|mSVAvrr2&6j_Z6LYQHQIpt z>8#-0%Ck((2{=)@KI4dTQ!4=s`22v)4S;$TX2tT1$weqNb~>3d?tLVXQXfqjL{~;* zdt+Yvo+KF3O-Icz$)J^SobIv1y#WYC@dV%CMJ}(Lm0Gc;ea1B6a%0V!Vq8FZ<7F>? zV;(k3B4RH28#q|ZJ_p2h$TYhuKnH)n=~*)2n5{-9zzBqnJN_UWi8(fse{3Z9$Ox73 zA4bIgrjeuc|5swkX22jI|93N8$7VW?%p|w}VP@^`nK@4Xo3@1-zu9T}%})4#VaM|S zYKIB@ckOWTDvd@*BbiFLQ!A%U&s}6ph`ixnyLRY=D=U)3dDp{f|##|v zf&lsT{nmEMm-`a?FKWMz$E~R<`==^;_dWP_#y_XzS%t*1D-)_8v#0Jn zd6<32-_6ariI;#f6X)N2d%|K^xRaN3@^&&4ScH*plgOk`BM>r86&>xvTi{Jf!9GS8lJVvhL7`fcH!>QgNv=35Vk zqt9N?M5{jE<0@b0B2BN&%ZRSWYB4^wemd&e3W)8QAf9FaaE8K&U{* z3J*&qN3YNk?^i2_oAxDYgz8gv?IG+)0COysO9wujbGou+=^N9fw5vtg9bX%%JKC-W z6r1GZs&=Cg;(;En0G>yqh-WUk$5{o@s#qoU63=?W{4$wmz90e8$%|mICr?` zc9iiRH9baXACcAYOBeCP7YPmLb4J)Hkz!Qj&w=x@HGXu`QFp$%iK{)2;G9O2B-0j8 ze4n;u;o>n|5WMFUw=k)}`sT%loUitWs*Bqk#U}odn@@RNn{sU(Ki7nf>BA14ZT9ln z!13o1`fZM#)7vsx@Pn6$u-2QPIvKPLET3k7lVD1m6L~qIWJ;BPU1J$?&=OKQ5z$rK zpI;&7(Uy~=89Q8eJ2LcqMAjNE^|bx2$r=8+Os)QKQ3-vI=m+!a^LGk8s-Bq+JskDD zZF%d{P-$l=wCsUcKKr@%HA-Jl2P=<1_J#;`=H1eYy(<;C>riZH5WSl!aj92?moMA$R3d^gf&K-yBBgiX zS}zuL^BQeXdhw?pR zq}8XjyNB3Kb}uGm5R*+@GC8(3bFji3%paZJsomPFwoA-pNnmeXu)`n9afV#6u~OaE>dYuC`(L~O8l zYv13f#rTlbc_2A$KrD>QaJ}w%QKf&3iAkR1SDJ6>XEorxB=dmDdHW`$31e;&(!?|- zlnByZa~HjN3vP`QU&Lt(AmvZT1wN$1dGAE(#QxwJaL>}&R$_6Tnh=AeI=vnmm8Ec; z9I}&}(H8?5kC|U_Bd&)|?x}RIlaj@*X%%Z{+6t>hftu@;WT#={?90z{-(aaZ%@-6cwhq{a}o6sV1p{VRPhypym>$nQ-FlB?43OuV)#Xh_t*uoD}D(l<=dY z$W)_}Kk2Vib_uJ{VmEQGrLsmvs6*f@6@3$Dq=*^)a_}ol++h5za^4u%(If_w0dCFG z&+tI^fcYxbub(y>a)W609WV@AWv9;G8PrJ=>F=Vz8*IcL#Jf!AbmCO7wSzWQR#WaS zNdNn#K^C-AbJh$s4{t1Pmz>OS@;D)#t#cbfFCRvowMBtHXrsp?(NbZjvqqmWosmPB zSDz~AKjBZxu5DN8iCg_~SDSTqcGfvsojb$ZrdG?+rdq3BajYo6g`)~g?=bvK`q6cu zPM276(j{w5>Yny_x0ztMX6_G+LW==cK63hvfk17ir~88XPPMD$jcC@ko=VKK3!Y2m zAvTPVv-9<(xvD}l4N3C>?khsg9t&Yx*{kwwTnNDttu)@>y zfewYZnZ-B>yfEFUl$NpAxu&0(}`2`TcPiFNG^y^0Z zqW#X~+5QNco_2Dh&t!Mn@iV#YYH3YU_SAFQO=HC*^te94NRR6Tnd7HT zNe}KLjf}d84q|JZ#%2PGA|~QoUlDFF7IP2Uy6i_Wlr*AhI%bh0Ewf<# z#{NpHI$;|J2I%s5-BW&90~FFJH6!eFlP6&&q0N{^(?slm`6}~mw#W&t+mo@N^1VZ) z3$*67@zSMFXCp*^WL22i=4_sS9EW?Hv1a5ly>vOnTk8ABLY}i;c*xYsjNY zM{VW=m}g}hoTsh2EIu_AKgl!?Mv!Jq-uR!XXtL_nx;g8g2u)igGNr{|=IFPbx`cA0 z_a^e|HEKwx2v%E5`b<*bJqcW%ugsEJB14=iP=JA*3cNI@61RSz$}2-fG3To!o`b?` z#QX6%%XiA}^K{ntL{+iR%QZ}Hc{9#k)AjF@Vb+RE4_L<(gh^yi!rv?Rl$l+vpTqYt zxyzX;Npvi2XS?ledeh(R7Pw-GD0n$u#UD0P`d&VcQkMBg5A(RfK5Yn2Uz*QOBA*-U zbAPNhIm%D@vDbBQq`L**IWj+4I~5S?UL!te#F^KRXMX-rDki%1%x482a#qQe(w4Oz}lh#EnMi zf=AXbJ2DUQ)hQspfgh47m#9l*6TT~4q^90)C_GW+9?4n?v-l}4)G8I2_{f1;qEdM> zd8BYm_~G-lC4UvBXC^VUtNX0ovxvoghm~j<8m(;GUZVj+D@S=7$LE}tjY?BA`8EWX zDYN2mmWo?RKa(DvIf2>A`X%mUBr*J+Pn}S`h*xjG=I|;PguB=%wP5#izufHklROJ6 zlA2%x#l+~Kj#S9(t?n1gdapi1g?3V09+;CX&beMN|DOLUU$Gvu)C%dWxnckoH9*KBf;HNCf>`MQ952Yx0 zdj;zqDeK5(!gT>kDCMf&C}*Eat9b;gX76f)2sEE`!1Ax4QtwW&d?-GBQhy`sYEv$5 zQV7OU+}xTk~2TVrr`k| zj7NVOORzIv@%LQ#GQ3j;I%_m?t_ur3&=d;Eq>_%K!lMj#j9B6Z*9|EHe(pUl4YzBM z`7zv9eaEDbVxKxW?Bgi81pzxM$3UycrND99iQGCmE(QK`+lf4O6!-6I8j%E6AlIyI}Cybl@MiPXs~$ From caf5a0e4855cadf100693dafbf97d2544121437e Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Mon, 15 Jul 2024 16:47:15 -0700 Subject: [PATCH 14/27] organizing simp ana code --- plotUtils/simps/expected_signal_output.root | Bin 0 -> 21618 bytes plotUtils/simps/simp_signal_2016.py | 270 ++++++++++++++++++++ plotUtils/simps/simp_theory_equations.py | 145 +++++++++++ 3 files changed, 415 insertions(+) create mode 100644 plotUtils/simps/expected_signal_output.root create mode 100644 plotUtils/simps/simp_signal_2016.py create mode 100644 plotUtils/simps/simp_theory_equations.py diff --git a/plotUtils/simps/expected_signal_output.root b/plotUtils/simps/expected_signal_output.root new file mode 100644 index 0000000000000000000000000000000000000000..c99bf34ec32440e01f087184bfe73d46a64a02e4 GIT binary patch literal 21618 zcmeIa2|QHa|37~1`<7i{l6_wbWnar0QnnaoCd)9GvF}SHgwU!;DoJP&Wmln;L};}{ zh3sT&@xS+4GQI2lsXqVz_wj%He&6G9XRdqCInUR5z0P@U=ic$f;RpaQT?+sJ3IMEp z!DBOcz8gG9!NZmu{6GT$M;-u383QD*$Q#RbNLj{@zB2bPYTQ2n-ufoiFL&J5xo5f(!LOF|9TUkfO>>`1xVDu z4sBO^2K!cp$ZbLVnp|>Ch$?abZm$VbwFtGRurhYT6X-1ozG$Q;+IOcH2Dd>M=jV(K zLSWszJkTic3s2Al02Y7(ZUwUnZ27w2K9QeP61E9tM^#{qa{(!X_sB>Wb@gKDs zSPfLK%u-MXY)@eYqy(uF$^fVXaw|i5UA7;!qCd*f6}-zV_hTcSiC;HDdc%#t3jaSV ze=&AjW9o4x;3*prNHV%NNb%U{Y&Zk1D(;bv{4`IZaoRw)%;uBwR)Pupr*|ArNkyGK zkiFT3g?pEfytOX+q500H6Dqnw)zh72*IR6A{K7we3Hqk|*~Vi$Y&4|jb9JYmM#K&# zlDkv1Iy-AqS>^T}-P_Ph&6NbiCC#KJk;c?r+vn(iLpa#R&SBzancp?Son9BeI!2x0 zdSufBlL-H#Qx06d7IPvN`X&zzm8k1elkDTIXzI+FG;V=~ohOCvly<*{30- z^(J=d^K;?2h=4CWer?7d90gA-9lXb{vGa@7jE34o*PAb!Xqq*?E*)S~AU9%XY(FmD^u*L<_Qba)n8^f&3d$s&qrL`Aj_psf9zfsifudFx68?2 zww<|H^0wmq4wvh(57Vz)$G)mGX}>5e;%Q}AOntjWLMu;ZQF+(#9 zqs+~%Oy4;B`2<2o?`!|jh{IcS#)HqF-@mhIq9p+{m1%`?9iH{Cn`D!3npx1!Y)Yh0 zVo=)s0cgWbAuiE*>MuTe@y%eDw_(cjt)`EcsD}>UkQ==vrS&pVWQ+mFnsyA=!!qq^ zns=M$R_jF{M&9D`{W%-9$0%aa>|8?@X)kwuO-iRE;Z5wh!q+yzb=e~9tP+c#w)o9x z(puYuBeSmyJ~DP_9_(NBPWpGX)hl5AA6lJVC=Gcwb5JeONksf_;E`cIo)HaS4OhU1JGdeK!JR zQ!FCGhF{wW2KXFic+#Gv7|Y;lMCaN%G)5YJFp+gIm*L~UF)bnA!@N_6UmCy*uusuW3|MMdeUq^XoTFpWaU&9z2)J z`zAqW2tCXs5m3@$b1miJCex-Tyj=%49O9ktI2%A zG748LcJ9uhM@AnC59JG$$Bmw3YpbaG>~T!D zje}$M@a?#6_ngCaEu5{rwni_Rcr_GL?}ZdZ^|1{Hw8&3Ln(2qRht6?7pF7{3dHh^g zKv&M$f}|s{l@j@OJzF!6K95)ZT-|jK<*3Iyb2DQh!6^Szoe1B-t$UvB60ttgOfqBq z@!P$9OjPax^u>_J zR@z+Y1e!s+C-^LKn_yeyj$WUc3!xipt2CIO`5hwTdbU8tW0f8yBy%wPx(vH=y#i&# zd;!UvC688RxYR`HuI*<(ou!Hz9p(?+%n1}T_X?@C(r9a6AoV}Pi+F^Oe?Ik2g8Eg{ zp|`k?krP*C=%<5(5ru6%=efoiwS^awiwf={bT4mz_BDfk;-v%4B{xB>i7DAKDg`0x zeNQ}!vxcd@oz^Wm#QG^rx5sx;^P}sJi_4kX=9`sCsV}P z_gM0mc%#knJsKqT6>PTzw^RA|Hwbk0k2-M%R0$g$k#Hh=dx1i#g=DM0LbJ3ct7wbl zIVSFHPniye994XBIe@Ta-*KnfY6?YFJecsl!POD4->Ii^K$&k=?W5rlno&|8mpgJ* z42-GAr__70b!fzW8{d-eO4L%=d(5m&nto=>kylSPEz(}XKknzKDXmr6nD{W0es9UX zV`1#PcS-NrlJyM)bk*srSG~HHLN&4wSk$_Sha^7!nU2?qm|o@2v&SB1Nh*)a*w&1z z)3xlqz~Lg=QG46NCO7O5UDlqLZ;vn-#k7w-BYXd)p_eOL!=b#Bf${QqkD2__n5yg? zefAE!lJocVp7$@-9dq|kme|jL^}p&=*ri%K{J7%;b(aMKGaFVhr9&@$ahHYF@zf}^ zRo9mNWm`*1CcJ8j6v^J7!+EK)Y^+N<&o}g3Kgx|cIjODNPZ=*grkDJglq)D}+>lL$ zA&#unhBbhh>+m&vxZ)y-$hdO54Yhc55&P3xElFC!Jq3$oYo8oXtMGF1Sz6cApDqTn zRg*rBZ0*+`4a$+v`8??p_XYPx>s!Wi78bQe-olcB%CAjXEHm7cTX}2-YNv9~hnmTg zm0RAYE4$%HzQlI^L+)1;^FyBe54h*lKH+AhnS7x!Y{5~ZJH4!%2!4y9iDjHqrbQet zRKnv0s$$)8y;a#X<}L3V=Q8cOGQEv$^z# z#`?BNyLZZs1jHLRy5#OKf5wm+?N@KF=U4`_5j1FgCl$6+l7$RXetB%sSM^X~_ob>c zvK$FLI$uRM2n)5mE}@o+;jYu%7&Jw4!i29VWV)V*LlN&SA{SAYdwMuZ$j5Kr+l@}j zVgpP@Qmk`_=@Lo^7Czy)u5J75Te;`5`=SnBZ#%Btc1(6o`*Ff#@AeZ?1!DF_rU%V7 zDbim&-!B`6UO;T#%pyFUGJCPTCx+$$M+}u>NT18;Ix2;$VX~Okmp9)Q&~8DG8thOH zs-Fm=_abj(7#FrQ8GYFb zzYFOe-6n}*12=M2dz-LYTeYY(q{4{v8Re)4*H1qxdruc-^S;AoQJIGnpW+VKv~reoeoW>=f`hlL>JJ6M6)W{n2Sfftm>V- zSiweOFm}TuYe!t9l<_6xflz~-@>YraQoZ_!^K%Zko40{i9AWfK+Z*~m54=*nT1xXg ze<`qh;-o^M8gILFep^)Lvof8X*wT{FTF0BI`qb|kT0efcKp~6o63%1XPMsdb!LnDM zvCbeuUY{XKK{l(n-=QXZL$|qHf)OEDdx<1KWsyrL&_VWidg~cVhZd=hN&l7!XT?od z2E@po+^BKvk;-Ja8qGhfU}WDnw4-8Cj@`kPF~8C?XB!1mV^ZO%H)Z|=nXe5q33Aq6 zRAm&s@i(Fw>zq(;yM}Z4Wvi>SPmc!haNK++C|p|ZbpHxc*J^>XFh0}Uxk{whE`w~$ z1TTzjJNxAMCg&*rkDFeyGVMiryk+Fn;M|5~5Nu0&aiR|Mp=0)Q+>_pl`JKn_YrJ*R z=@GP)YTDTOsI=wyPF(%rnJUD>)8KuyR~i!8CF={B4)i9;(-{Y0t=qZ7iKGOmuhaIcvQ~yOd%NgqcVh zl(wBasGxfC(MLJOLAp{EV>j)qE(H|ro5|*;pqB|>C#HOk<{j6#vB<)QtoZG^4*dxI#d259<%SH4MPofAl&N7(&)l{>mHwB zZqI26m2jXbQloxae5o|ngVy)rmZ=NOT+M~Lvx`*r4|fmgY!j$6nS7d{r*Mh)jc1l* z`W1!q;r&bZml!E(1(Ap^_m4505PFcUw8b~-f!EOdQ*TDu>d!-?>RBpW_Z}$w*GdkK z1Hz zl;p7bNIJtOdf)OG>zT?pYq4WbWA|P^b9J}OsWS0odET0+Tj)K_oAf^+pIFlPetE8< z=-hJAOq!jp;)HHd#HXElk4G8exn^#dbDD~yRCVZYUhJUv?2ZSP0&<5PqsH>XGNq~) zYr_zi%5ATq@_=J{A2wkvwmwatiuvlmqvTrPjQiNSiKK&X+SyaG@{st_L~X=qbu?f1 z%NgfHM=h0vF`Eb#yD0t3*sB_LTMy^ByWnfxNv<^QPV6_>nC2ITYsxXFpV~BY@!UB{ zo!}r7yQj2#H8|ckLZ=MXL%Elj=)JW>;M6miD3PN7o~eZr(!Zanm7@MJQ_BSAdk2=l zVzoZa3xHQ!02)mI;0CiXP?{Pb13&4kbZ}TKn4)&Wc>#O-o3o(nVAb<*2WHOhTE4!> zAgFN~K(^crN}fZ74EoRn06!uuZ6qEIU3G-I0rmi3CK?b18!jUP06=90_6h>BfHmD* z5Ef=;RtOw?1-z*Z9z-Q|W3ANs3*_2bmU`d?5={UIA_4{n>*jR;f<^+Q;HMo7Ew&z- z0%Hnhy*a$V4+IW_Ah@CtF5gEF09pV5aDm4!3l+pt5JeLJqKUxau~-Fp!$Us+kb)JE zFunpYzNK3*8sX*V>5TS;C_rP;o@g%u9^vLiObNtWa9aGjkhysgps{`;A_L0jLsxrX zL2p{a2AR)jWr_A#C+jcR?F~@(VC`#gAM0cdWdA5@SbAX9I3l=JS$hLy;5`gH;B9N5 zVCU;+7vFbq1kxE#@I|^15YFIiLAYR%csyA9A7y1eu3cXhJ{d&y2oWi(BkzPq&bmef zNci%e2jF)9ZUxvA1=#fey8-{e<&7ZE{m@=6XrwcC{aiz6Y3rV{eWq#43T(OXrl*s6uz?#>P!;=vf<|IrJVdLDHDj0R;9cjma-KrWuY}Z zu=8aG|70HtLjJL=h+*yW>JTYF8Y$+8fLCNn(FYHaV+|6NDYcarfuI%Wh6n6%OX^@} zpqin${o@EJF8w${ItW)20Mdzoh*obPKnhmyg@>uTh6#X^8Dola^LN98ni4Nzy0(Vb zu8GRYf<@d*0U?0^PsP3D!4oezF|g@xEoTrl;@4!M08+5RewZxj^<>dvAU=SL3+KxL zf3B)Fa6=D`KteoP-tZ&^zkgXM@UA#tXycDqsSMF z41>X=3Czo15c}8QDS-c=$0;ya91`Mtz^~!$VR&m8UU)tH%FrRB!U$WIzk?3{HM9#1 z?Ephdt%s(=Sh=})7^A%o5M0@ozuNvtPeW&F^pV_jQ%e}fOIWGYr70#^d+Yu5}P6T}1rk^uUn&VB^+PyMGA z+xQP$$S3#tkr8e#tNyDsKnhkchq)m@#0`w8vzr&5@w@SZ{sn&Dz6Jjp)DZ@?fkBCk z3k7DMr<)f$78ig(qYl81JI2iy5BiR7ULZ8}$^m};yilT`=UdR40Pvej{D+WXpC>Yq z!_V6r^w?MNL4WnXh6iUW7zAl~L9rFE-oOwIo)WVUVlkUG7$89Cpv0kg42l=_fy0G7 zpZ^fB!10?di1C{S<_CCvh}CRs3`Y=f2$UP%8wgkoEUO>*IVqkpvN4Bmpf9Qv-Tl-~~wpI#3lMt0E&OFQcM@kds$cP*jzb1)FJs z$A6fvYwCQ({W{}F68ocZghUE4^*2=n(K@Ba zEJMQn2mSInl*_<@&s)OFAeiRxV*qZ+zf@SzourTj60pRcADhLo3 zD-(1ES{^j=FC_c-*sxsJVgI3MAr3$MDTiFr$MaMC|G9Q-4PSCyHQ0>7;! zKyd;nA}+7y{7hkSvahXAP#3NyY5{PG$5jBg9S)6SCgtTYhwe+qrvR)as$(fJ9W8muoH>! zga1dkp}<@+WN9GP3k5v?nn9*t^HM;g3L-`RniMoZ3RZCZnQspHO6-u21j1rLhlj{h zVgmkz28i!XL`1Ce{W{EdwlyYz0$vHsfR)e+V*4Mg)_xtnn8V8|89pgMI~gHJ|d^n4mBjC5b>+gvrPO3zKCH z3=}4rm7yH4X`ALf5SUsMV3>v*f(vVP^NFGF#|6X#Rs}Ks&xq$C<_`k0C>TA3sex`A6OuZO@%RaL1VFaiSJu+ zP?#+$rz<9jfbSqg<#ojn5_@FK{L#K(jyB*=(j3Ir1DwZTqz_G2JAf4YT;3BU+Cc%s zd?;4`}l!@4>%+Kl9J!ds}m{b6-olxi6~iS zggBJ<`vW;d{Cb)EO9}&)L!dn8PYL)_5`Obh3EB0+0JA8dj_~yJCUHAV;A4aa? z;sw*<3e&Q6JuQ?NP%{L`3oKItu1ip7K+E%|EA>x^@h2kYyOX>=*GQE6qQaPYLtHjC4w^h*x&ejumBFn# z#?WTe8sWc1FzYhGL|~wQOxjCC^Gd)-`tYX#qdY(otr!7-`Bt#uCAcBDZdL*k60zQ~>b^{KU$Ko-J-%W9wmMz+73 zh$Yf8^cYKUKA@LXJw1H>S9<@y@I-=s>WSEZ!~37{M1p?qiP(t$&yf%q^b2R`Pe=eK zAj#gJO>z<~c#>~eTaBRQgAGz zi{D?o*14FE>M1@Ni^@0L0KRx_t*(te`=L((Rd9%2FCmHjobgE^lrguq)!XgGEe{u4 z7SDXL`|xDC@%Gu$+3I5O#p~{_JG0Y`)6BAxIN1zxdKH!!q2z~Ln@D0_qio~#sp$n8M+Mw1bRR}(#F6_- zSqK9{Rq&=;-;?8dL=wGtvtujuE!9_hr1M_aP>{sjo-~SYsu`Sk z*;yAI;C-uFa*LEZcId6w$KDZ?fJB^#=e(Joc~}2Z5Jv58jarDv&f%eXBl78*L-Q_n z_imkE8j7kJ!&FE}dZsM#i(+Un;+ypE(oB;ppo7GYGv7M1pmf^6bqhM};pWEa@+%=3 z!!F4YN{ebs*+t~^aUgfZM53Gx>U$nGMKoV#YKc&8P>?ndRl6b3cX4Lnzv%$=;d+6H1u8EC=yl;n;JOkwg3a{FqnEyg@ruKOyr$v_LaMx{{ zcHZ_>%*!fEuLY*oldNQiIWq*)TF&enp58)Z*ySZyFn4%!+x9}d#ju%A-UplW5zn^p z4XG)2XjW&%&Lr7Wei(fs@aCq3jO_`aynOC%rTSN+T!q0@wDsY&uZV^;qX_nB-H;|XWu%xAX8J_Y8 zd_65Kv4MtAnNeN*b}nXppq}?@B&*5VN@%hMC+j1}9=g){117A$YB4_bY(dK~-Z zz=KgGh?rEYQ6(v+kEbWl(-hQz1)L_w$$^E)rJnu^9ty!NcT}-$& z&Z*X_v^!2Sh)QF?#(rtquhVNFbZ*bidlP4PdlK%7>4}`p81qh~bLP)b*_2u)QPilM z%cJM7dM3*@U!S{yx1OtpN_6QqgSh`&ofNi@O_VsXbHlASuW(8o%Adpty)vRwpV@U; zD)8b2m3xG+yRv8Xk#6TMi;4ZT`DVMmDlb@CD;i$RLD=t&>;_+&zFuJ&(@obSxuK3c z=c%{dyKC3xTp#uLrY~KT;R$)2?xmfq<#OXZ{g%bNQ2jeSz%)`(^;V#kiz-X9?bzXz z+9l2LPbT!1s>7{K>a=RJr^ppYYH}h<(y171$csz@-e||T=MS9iM?b51dE$#@^Ttp2 zr_WwRD<=l?w)?$FWHnC_ccdWu9R958i>j!1fY7kGk!SMb>LaRth3TRRYWYJKJ~pAoXTyQiPhw&vNe2h^e(+FsqJi zj@&!*DRsz$YV$d5lGC%+%r`*6EOOGGU zN=c4nemB(D!ikFP8abq9u!SPJOTYZh2{EnOsT=R7UhMC!F&bDTcdy;HC)dW+hiSg) zgXG0$oskARicNW%8SQO+l8(J$seRB|oNEzF!6@ZvbUpS}?O9~HiV#0(q0FOuS8feQ z3N22m+!5OARB4=-{jh=T%*)BNjoo1-jbB=ibGNAGRF!ji55B?ImKwUy$;%_RdF>!b zHBkEdbmiYbsgiYW)7NjE5pX)cwaqiMBkJuJf#B?5&aYOXf_l@3gB#~$&?X*<6i))? zuitXD&U=rpWzS&4$Mqt6nmc2gau}JnQVynmlYg4}RRF+?~4rP@WY<;}M|;$B@$AR&Gbs)dk2H zE7Apx=NW3%g%1ehN?Avac2}Kt&ZV-$X_d+sXqBA1fAHno;JQ=pctJIN_CkLt7s!e3YS zRd~FFw5pey5|W%ZuKJTIN06*S+1|ljPQcy*u09Tb{KU>@5kfDGD76a1U#bWDF^_(I zZ8kI+V3^1G{M1{_LB#{{GIN0yn?FB#$h+|~zqi45|2WYs(uI^IE4gDMQT0<kxGnL`{FYG0x<$P}2a{NV$3~Q~st3wz#D%YgWGk?t z)UP?GOhtuC_nWx4Exs|AVZ1?0Q+|HKqbG~+l?J4p6F!{2gZQv`iF#wVMbmpRpA(N~ zV_s1{^U%lkAnr2bc)#_lALc2@F4IOla4Nesk@v6Fcc-g3xPi3vT8c-p;{+ zOSHNYS)ZHU%aWuqDLamI%0?d2tsb@;F=6iWem(21BOc4=b97;s0_nEQTk3P8cgN<# zg*3F}AKeIv|9q(N`U7W|&C;I=nija}nnpjnr|r=oOHuwvBR|?{{^5m-OWE8;MHxvN z)x21p5BLZXTz`LSS$xrxMrE4r%av??&tEb2Bj?qgnca2THTA`_JxAaK`^*gs#CXi@ z*tFrOtwtyIKDl?M?h6H-bfX~0eY08tNB+CqoB8))vqCen4Px&r#A}R?&NI?Y+p?

0Pd8bP^k$4QU zmSbu|5Zv9S8}_Tw>9k5Mj;7o$G7nU!D3wLsWU^&D;D5jPk|sX1rm8Q~*;UokB~k8E zbi;@6BL+`UI$!7HZ&jYqF%OV1x@CHS)qRRgC(qzY?+cV%;_NWj4z#PZx+{U<;lu46 z_Kyr}qL#>CCpE&MWG0PT4a~~gn79rnRe;jAF zLA<|YqN=I@dHr;B2th-=@|+kmV5_%wrBY+R_}qm{wApg8%l!26i1SD$PYKaX?sm7 zO)ei?T&jAdxdi?4?nIs+X}B?iwMU_8eemOWogf)rL!Ds%OI^LnysVtczmgXjWp_ediD&N&V0uSlPshhHP=?{&(1#?7gm z=*l>r)z%x0PI98hwXfMtYi!UQC;VTDUZ^sPqSmBM(+Pd%yST8;58K z#pNM`$DZoM8;@LW6EJh#BemtC$WUGL+2CX6dBj?qxYIP>Ptv>l#hx0y*m3Tm`h$!i zL^Q3&RgzD1d=shnLs8uwz%HKZwiollI&wW%hdO2$n~W;0?pHiuw87G#zFm$>4X%*$ zbLvwN#OhU{9fu>!H|NG?Y-Wmm9Mar_b&YR&$@*x{BBQsnA2`Eft5AMH6sK5VP947c z(JmA&qQ1j8laH{S-kwoIt<-6xq`x9F#zs5Hb{_YwW})UTiEPp+o!hIMr2T;h=^AvU zWnW9NDsa@3P%>#Sj@>;O>2-ee-G9J2HW+IEloiGx4togT}Y^|m*jtukF8vEwY)O!6^;{@&f4X0)dORjZ} z>t{>R-c1(vIxltGMbFzq?M@JSPQ|p!WlpW#Qz;NMg<&g!d=`kJX x_vm)RnN>FU 0] = numerator[denominator > 0] / denominator[denominator > 0] + return result + + def load_pre_readout_signal_z_distribution(self, filepath): + with uproot.open(filepath) as sig_f: + return sig_f['mcAna/mcAna_mc625Z_h'].to_hist() + + def load_signal(self, filepath, pre_readout_filepath, mass_vd, selection, cut_expression=None, branches=None): + with uproot.open(filepath) as sig_f: + if branches: + events = sig_f[f'{selection}/{selection}_tree'].arrays( + expressions=branches, + cut=cut_expression + ) + else: + events = sig_f[f'{selection}/{selection}_tree'].arrays( + expressions=[ + 'vd_true_vtx_z', 'unc_vtx_z', 'unc_vtx_ele_track_z0', + 'unc_vtx_pos_track_z0', 'unc_vtx_mass', 'unc_vtx_deltaZ', + 'unc_vtx_proj_sig', 'vd_true_vtx_energy', 'unc_vtx_psum' + ], + cut=cut_expression + ) + events['vd_true_gamma'] = events.vd_true_vtx_energy * 1000 / mass_vd + events['unc_vtx_min_z0'] = np.minimum(abs(events.unc_vtx_ele_track_z0), abs(events.unc_vtx_pos_track_z0)) + + not_rebinned_pre_readout_z_h = self.load_pre_readout_signal_z_distribution(pre_readout_filepath) + + def sample_pre_readout_probability(z): + if z < not_rebinned_pre_readout_z_h.axes[0].edges[0]: + return 0. + if z > not_rebinned_pre_readout_z_h.axes[0].edges[-1]: + return 0. + index = not_rebinned_pre_readout_z_h.axes.index(z) + return not_rebinned_pre_readout_z_h.axes.widths[0][index] / not_rebinned_pre_readout_z_h[index].value + + events['event_weight_by_uniform_z'] = [ + sample_pre_readout_probability(z) + for z in events.vd_true_vtx_z + ] + return events + + def _load_trident_differential_production_lut(self, background_file, selection, signal_mass_range, mass_window_width): + dNdm_by_mass_vd = {} + with uproot.open(background_file) as bkgd_f: + bkgd_CR = bkgd_f[f'{selection}/{selection}_tree'].arrays( + cut=f'( (unc_vtx_psum > {self.cr_psum_low}) & (unc_vtx_psum < {self.cr_psum_high}) )', + expressions=['unc_vtx_mass', 'unc_vtx_z'], + ) + for mass_vd in signal_mass_range: + window_half_width = mass_window_width * self.mass_resolution(mass_vd) / 2 + dNdm_by_mass_vd[mass_vd] = ak.sum( + (bkgd_CR.unc_vtx_mass * 1000 > self.mass_ratio_ap_to_vd * (mass_vd - window_half_width)) & + (bkgd_CR.unc_vtx_mass * 1000 < self.mass_ratio_ap_to_vd * (mass_vd + window_half_width)) + ) / (2 * window_half_width * self.mass_ratio_ap_to_vd) + return dNdm_by_mass_vd + + def trident_differential_production(self, mass_vd): + if int(mass_vd) in self.trident_differential_production.keys(): + return self.trident_differential_production[mass_vd] + raise ValueError(f'The dark vector mass {mass_vd} is not found in the trident differential production look-up table.') + + #Use the reconstructed data in the high psum region to scale the differential radiative trident production rate + #This scales the A' production rate, therefore the expected signal + def set_diff_prod_lut(self,infile, preselection, signal_mass_range): + #Initialize the lookup table to calculate the expected signal scale factor + self.trident_differential_production = self._load_trident_differential_production_lut(infile, preselection, signal_mass_range, self.nsigma) + + def total_signal_production_per_epsilon2(self, signal_mass): + mass_ap = self.mass_ratio_ap_to_vd*signal_mass + return ( + (3. * (137. / 2.) * np.pi) + * mass_ap * self.radiative_fraction(mass_ap) + * self.trident_differential_production[(int((mass_ap / self.mass_ratio_ap_to_vd)))] + / self.radiative_acceptance(mass_ap) + ) + + def get_exp_sig_eps2(self, signal_mass, signal_array, eps2): + + #Define simp masses + mass_ap = self.mass_ratio_ap_to_vd*signal_mass + mass_pid = mass_ap / self.mass_ratio_ap_to_pid + fpid = mass_pid / self.mpifpi + + rho_gctau = signal_array.vd_true_gamma * simpeqs.getCtau(mass_ap, mass_pid, signal_mass, np.sqrt(eps2), self.alpha_dark, fpid, self.mass_lepton, True) + phi_gctau = signal_array.vd_true_gamma * simpeqs.getCtau(mass_ap, mass_pid, signal_mass, np.sqrt(eps2), self.alpha_dark, fpid, self.mass_lepton, False) + + rho_decay_weight = (np.exp((self.target_pos - signal_array.vd_true_vtx_z) / rho_gctau) / rho_gctau) + phi_decay_weight = (np.exp((self.target_pos - signal_array.vd_true_vtx_z) / phi_gctau) / phi_gctau) + + combined_decay_weight = ( + rho_decay_weight * simpeqs.br_Vrho_pi(mass_ap, mass_pid, signal_mass, self.alpha_dark, fpid) + + phi_decay_weight * simpeqs.br_Vphi_pi(mass_ap, mass_pid, signal_mass, self.alpha_dark, fpid) + ) + # the weight for a single event is the chance of that decay (z and gamma from either Vd) + # multiplied by probability the event was from that z-bin in the original sample + signal_array['reweighted_accxEff'] = combined_decay_weight*signal_array.event_weight_by_uniform_z + + return signal_array + + def tight_selection(self, array, signal_mass): + p0 = 1.07620094e+00 + p1 = -7.44533811e-03 + p2 = 1.58745903e-05 + mass_low = signal_mass - self.nsigma*self.mass_resolution(signal_mass) + mass_high = signal_mass + self.nsigma*self.mass_resolution(signal_mass) + print(f'Signal Mass Window: {mass_low} - {mass_high} MeV') + sel = ( + ( array.unc_vtx_min_z0 > (p0 + p1*array.unc_vtx_mass*1000 + (p2*np.square(array.unc_vtx_mass*1000.))) ) & + ( array.unc_vtx_mass*1000. >= {mass_low}) & (array.unc_vtx_mass*1000. <= {mass_high}) & + (array.unc_vtx_proj_sig < 2) & (array.unc_vtx_z > -4.3) & (array.unc_vtx_psum > 1.0) & (array.unc_vtx_psum < 1.9) + ) + return sel + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Process some inputs.') + parser.add_argument('--outfilename', type=str, default='expected_signal_output.root') + parser.add_argument('--mpifpi', type=float, default=4*np.pi) + parser.add_argument('--signal_sf', type=float, default=1.0) + parser.add_argument('--nsigma', type=float, default=2.0) + args = parser.parse_args() + + mpifpi = args.mpifpi + nsigma = args.nsigma + signal_sf = args.signal_sf + outfilename = args.outfilename + + + #Create MC signal analysis tuple processor + print('Initialize signal processor') + processor = SignalProcessor(outfilename='expected_signal_output.root', mpifpi=mpifpi, nsigma=nsigma) + + #Set the differential radiative trident rate lookup table used to scale expected signal + print('Load lookup table') + cr_data = '/sdf/group/hps/user-data/alspellm/2016/data/hadd_BLPass4c_1959files.root' + preselection = "vtxana_Tight_nocuts" + signal_mass_range = [x for x in range(30,130,1)] + processor.set_diff_prod_lut(cr_data, preselection, signal_mass_range) + + + #Initialize the range of epsilon2 + masses = [x for x in range(30,124,2)] + ap_masses = [round(x*processor.mass_ratio_ap_to_vd,1) for x in masses] + eps2_range = np.logspace(-4.0,-8.0,num=100) + logeps2_range = np.log10(eps2_range) + min_eps = min(np.log10(eps2_range)) + max_eps = max(np.log10(eps2_range)) + num_bins = len(eps2_range) + + #Define all histograms + expected_signal_vd_h = ( + hist.Hist.new + .Reg(len(masses), np.min(masses), np.max(masses), label='Vd Invariant Mass [MeV]') + .Reg(num_bins, min_eps, max_eps,label=f'$log_{10}(\epsilon^2)$') + .Double() + ) + expected_signal_ap_h = ( + hist.Hist.new + .Reg(len(ap_masses), np.min(ap_masses), np.max(ap_masses), label='A\' Invariant Mass [MeV]') + .Reg(num_bins, min_eps, max_eps,label=f'$log_{10}(\epsilon^2)$') + .Double() + ) + + + for signal_mass in masses: + + #Load MC Signal + indir = '/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/beam/smeared' + signal_pre_readout_path = lambda mass: f'/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/nobeam/mass_{mass}_simp_2pt3_slic_hadd_ana.root' + signal_path = lambda mass: f'{indir}/mass_{mass}_hadd-simp-beam_ana_smeared_corr.root' + signal_selection = 'vtxana_radMatchTight_2016_simp_SR_analysis' + + #Get the total signal yield as a function of eps2 + total_yield_per_epsilon2 = processor.total_signal_production_per_epsilon2(signal_mass) + print('Total Yield Per eps2: ', total_yield_per_epsilon2) + + print('Load Signal ', signal_path(signal_mass)) + signal = processor.load_signal(signal_path(signal_mass), signal_pre_readout_path(signal_mass), signal_mass, signal_selection) + + #Get Tight selection + tight_sel = processor.tight_selection(signal, signal_mass) + + + for l, eps2 in enumerate(eps2_range): + signal = processor.get_exp_sig_eps2(signal_mass, signal, eps2) + total_yield = signal_sf*ak.sum(signal['reweighted_accxEff'][tight_sel])*total_yield_per_epsilon2*eps2 + #print('Total Yield: ', total_yield) + expected_signal_vd_h.fill(signal_mass, logeps2_range[l], weight=total_yield) + expected_signal_ap_h.fill(signal_mass*processor.mass_ratio_ap_to_vd, logeps2_range[l], weight=total_yield) + +outfile = uproot.recreate(outfilename) +outfile['expected_signal_vd_h'] = expected_signal_vd_h +outfile['expected_signal_ap_h'] = expected_signal_ap_h + + + + + + + + + + + + + + + diff --git a/plotUtils/simps/simp_theory_equations.py b/plotUtils/simps/simp_theory_equations.py new file mode 100644 index 000000000..85057ece6 --- /dev/null +++ b/plotUtils/simps/simp_theory_equations.py @@ -0,0 +1,145 @@ +import math +import ROOT as r +class SimpEquations: + + def __init__(self, year = 2016, alpha_dark = 0.01, mass_ratio_Ap_to_Vd = 1.66, mass_ratio_Ap_to_Pid = 3.0, + ratio_mPi_to_fPi = 12.566, lepton_mass = 0.511): + self.year = year + self.alpha_dark = alpha_dark + self.mass_ratio_Ap_to_Vd = mass_ratio_Ap_to_Vd + self.mass_ratio_Ap_to_Pid = mass_ratio_Ap_to_Pid + self.ratio_mPi_to_fPi = ratio_mPi_to_fPi + self.lepton_mass = lepton_mass + + @staticmethod + def rate_Ap_ee(m_Ap, eps): + ml = 0.511 + r = ml/m_Ap + coeff1 = ((1.0/137.0)*eps**2)/3.0 + coeff2 = (1.0 - 4.0*(r**2))**(0.5) + coeff3 = (1.0 + 2.0*(r**2))*m_Ap + return coeff1*coeff2*coeff3 + + @staticmethod + def rate_2pi(m_Ap, m_pi, m_V, alpha_dark): + coeff = (2.0 * alpha_dark / 3.0) * m_Ap + pow1 = math.pow((1 - (4 * m_pi * m_pi / (m_Ap * m_Ap))), 3 / 2.0) + pow2 = math.pow(((m_V * m_V) / ((m_Ap * m_Ap) - (m_V * m_V))), 2) + return coeff * pow1 * pow2 + + @staticmethod + def rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi): + x = m_pi / m_Ap + y = m_V / m_Ap + Tv = 3.0/4.0 + coeff = alpha_dark * Tv / (192.0 * math.pow(math.pi, 4)) + return coeff * math.pow((m_Ap / m_pi), 2) * math.pow(m_V / m_pi, 2) * math.pow((m_pi / f_pi), 4) * m_Ap * math.pow(SimpEquations.Beta(x, y), 3 / 2.0) + + def rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi): + x = m_pi / m_Ap + y = m_V / m_Ap + Tv = 3.0/2.0 + coeff = alpha_dark * Tv / (192.0 * math.pow(math.pi, 4)) + return coeff * math.pow((m_Ap / m_pi), 2) * math.pow(m_V / m_pi, 2) * math.pow((m_pi / f_pi), 4) * m_Ap * math.pow(SimpEquations.Beta(x, y), 3 / 2.0) + + def rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi): + x = m_pi / m_Ap + y = m_V / m_Ap + Tv = 18.0 - ((3.0/2.0)+(3.0/4.0)) + coeff = alpha_dark * Tv / (192.0 * math.pow(math.pi, 4)) + return coeff * math.pow((m_Ap / m_pi), 2) * math.pow(m_V / m_pi, 2) * math.pow((m_pi / f_pi), 4) * m_Ap * math.pow(SimpEquations.Beta(x, y), 3 / 2.0) + + @staticmethod + def br_2pi(m_Ap, m_pi, m_V, alpha_dark, f_pi): + total_rate = SimpEquations.rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) +SimpEquations.rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_2pi(m_Ap, m_pi, m_V, alpha_dark) + if m_Ap > 2.0*m_V: + total_rate = total_rate + SimpEquations.rate_2V(m_Ap, m_V, alpha_dark) + return SimpEquations.rate_2pi(m_Ap, m_pi, m_V, alpha_dark)/total_rate + + @staticmethod + def br_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi): + total_rate = SimpEquations.rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) +SimpEquations.rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_2pi(m_Ap, m_pi, m_V, alpha_dark) + if m_Ap > 2.0*m_V: + total_rate = total_rate + SimpEquations.rate_2V(m_Ap, m_V, alpha_dark) + return SimpEquations.rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) / total_rate + + @staticmethod + def br_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi): + total_rate = SimpEquations.rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) +SimpEquations.rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_2pi(m_Ap, m_pi, m_V, alpha_dark) + if m_Ap > 2.0*m_V: + total_rate = total_rate + SimpEquations.rate_2V(m_Ap, m_V, alpha_dark) + return SimpEquations.rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) / total_rate + + @staticmethod + def br_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi): + total_rate = SimpEquations.rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) +SimpEquations.rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_2pi(m_Ap, m_pi, m_V, alpha_dark) + if m_Ap > 2.0*m_V: + total_rate = total_rate + SimpEquations.rate_2V(m_Ap, m_V, alpha_dark) + return SimpEquations.rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) / total_rate + + @staticmethod + def br_2V(m_Ap, m_pi, m_V, alpha_dark, f_pi): + total_rate = SimpEquations.rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) +SimpEquations.rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_2pi(m_Ap, m_pi, m_V, alpha_dark) + if m_Ap > 2.0*m_V: + total_rate = total_rate + SimpEquations.rate_2V(m_Ap, m_V, alpha_dark) + if 2 * m_V >= m_Ap: + return 0.0 + return SimpEquations.rate_2V(m_Ap, m_V, alpha_dark) / total_rate + + @staticmethod + def Tv(rho, phi): + if rho: + return 3.0 / 4.0 + elif phi: + return 3.0 / 2.0 + else: + return 18.0 + + @staticmethod + def Beta(x, y): + return (1 + math.pow(y, 2) - math.pow(x, 2) - 2 * y) * (1 + math.pow(y, 2) - math.pow(x, 2) + 2 * y) + + @staticmethod + def rate_2V(m_Ap, m_V, alpha_dark): + r = m_V / m_Ap + return alpha_dark / 6.0 * m_Ap * SimpEquations.f(r) + + @staticmethod + def f(r): + # Define your function f(r) here + # Example: return some_expression + pass + + @staticmethod + def rate_2l(m_Ap, m_pi, m_V, eps, alpha_dark, f_pi, m_l, rho): + alpha = 1.0 / 137.0 + coeff = (16 * math.pi * alpha_dark * alpha * eps**2 * f_pi**2) / (3 * m_V**2) + term1 = (m_V**2 / (m_Ap**2 - m_V**2))**2 + term2 = (1 - (4 * m_l**2 / m_V**2))**0.5 + term3 = 1 + (2 * m_l**2 / m_V**2) + constant = 1 if not rho else 2 + return coeff * term1 * term2 * term3 * m_V * constant + + @staticmethod + def getCtau(m_Ap, m_pi, m_V, eps, alpha_dark, f_pi, m_l, rho): + #c = 3.00e10 # cm/s Date: Tue, 16 Jul 2024 11:04:05 -0700 Subject: [PATCH 15/27] update --- plotUtils/simps/expected_signal_output.root | Bin 21618 -> 86910 bytes plotUtils/simps/run_signal_search.py | 366 ++++++++++++++++++++ plotUtils/simps/simp_signal_2016.py | 92 ++++- 3 files changed, 444 insertions(+), 14 deletions(-) create mode 100644 plotUtils/simps/run_signal_search.py diff --git a/plotUtils/simps/expected_signal_output.root b/plotUtils/simps/expected_signal_output.root index c99bf34ec32440e01f087184bfe73d46a64a02e4..ee9130e6969caaef49b2b17b9f394c25ffc764fa 100644 GIT binary patch delta 74356 zcmZ5{V{oQT6K*!i#mLx@Nkj`syDu zQ(e>3J-6lGK7W4~a&>ZY2Lrq70|Nsy2m7WL_#YVmlU4o$_3gLCF8c6zT6A2no&qGn8NSKYB{kcUp}9cLIz9>_3}> z-YH4n@ltlaXZ`=$)PSx$j8_6!Q9hXWw~H<-b#;YKMp;Q#uwt_4NLvT`L z42sVx?MeC*MR%NqK7{3I*Y7oh1LAEfT=_=%e%r*iMixGkQuvYB-E%){z-8!LPgB$7TjaAM- z>96z>1LWJ%>@NEoPGt<$(&Ns37|MkJwgYodWkqG0nVFX0TT7dzmv|BW#Ow@#%|xU_ zP8ZTA31*s7j;v_5XE7ORu@2uTjOYpt&2cmUC?x6CdQ)>|Jm7p#2jmxo<2@ZNFc{Iz z69`OUJ65CL4qWgVNO=;ks@}n`#PLts;U{ZwhA4rvKSHM@PT3kNmuJQT_y| zs_sdmFFZ+U<%e*6>f@*}BGw=qw%fQ*qb(oDCyf<($<#-$X z_I5$)GT5zd;;RuRd0MS_e8Y%bsZIyJ0-aHwJnOOF%J4+O{ochae{vkSpLUIQS{9BS zKV3J-ej|eXjNMYU468_bsU%c!{Lh7K_~CCC5*^D;bb!GBy*b{;Y50nmNnKx8TPl`k zjBMT%!-n%-ZUy_@Whwi~o#GgFB4{8hYWec2mPbbDmzpnxy4somQ9E)pKH~rubopPA z`RnMEwN7kTTz+IXhZEBC&wNS8a2n#$N4J`AK2k9cuh+_c9%+p$*eNVt zL>UdHq>%AhXRqI_n(t3AL-7nK?rRt%`oqbaNp`R%k{qZD#9Sm43O@+0>hZx@Q?pt$ zO?EQgb1N9VaYI`PBLuICAa;TK?4(U|J{Ya|&9}F1G_F(NqMk(tVmYmbEw@SP^N{d@ z6uZ0g75;13Uu`}~3I_Gv$v_)d(e;$8)u@@lSR8MZyd9LV=Hb+LzfbJM z@e|$#*?ibjWzUDnBe6CscK#P-X|z4Of3PKgdP$C?I&%+`Am_Q=)K~#+4Syg-%y1DYAv9qGrwfmkxxl~n*E*Dm7_WhqR%)Wjo>ykjj`lQ~ z$zO*L?7L~C0mUQKZy2KkI0eUxCfmyp`UcChdj~Ph)ljouK`C(~`qu+q8{F6?y-m;P ztxR?Jb=??YLE<0KN&X&-@{#U-#j)Cq%H@n71ENG(^Rua=3p- zpm9tv)PZ{AlR{j6$o=yyKEK3tVpEN*{(Hc;CoE)-Y377|$7Q6KDrja>&feKLc@C)J zb^IBXS6VUiH`Y$#)AqJ%|I*?MQ#wcdyvV?ndSuaGckS`f%l+6+W)DH*2S!^iS)KG9 zuTY1*nNEP?rPLqMEcBM$3o|xV6@pas4UrlE!Yc5(>EK-%Y&oN;^9>~M-Q>l#{ohq9 zO1qni*JvFwg~jJs_zj+@X0x-kwGZ3`wEHFP$BbBa#pf2`yu5Wcf-!#jSqPJbd9i8} z8`0-Rtvt;-YzQ{tdq%5Km`OXJtOG*DQL~v-F9Qlf>zC?Bj=8=D+WP*1PA-UWj9k~y z2iw05#;LC_<3q6AuU<;|PC`QMT2&o1;S_-$d#;o2A&Lns1Pr6A#X#lvA<3~2a{^m# zfw_6$YrStyoav7um1ONd-+xlG%Sj1fZn+4T2y5%Hb%LJ>`ky|f%m{l_q?iw_!y{i* z-2zBD@M1QuK$gxu%K@BK#QFF#>0Oc>&C>#jd0WrvX`t)RCMnbznR4OeyUj`yU)v1} zTiU4hPYAD(3imBMmVG1$<}b!zRb-tulzr%-#yXD~|_`s}IZ)c<1<@)G_LIX4Zr=$Q8ylz|Efaa?ljn6Nky z`Yt`2qo!p18WZX=K|^f0nqIH;&)eA1O^7Fq9{tG1%~L6TtIyy3>rx77+xRM{6}WxF zfSu~F>XGks9$@HmTFyTeVj(BTAlB-HOX1&LLMcVVNpaJM$u5g9dDAN?Xa9yXByHuA zC@xm8JZ+nB$aJDHTy^2^4%?9rjR`EY2p?}9V64VdSu0egMfu_gvt3&W=Oy8>YpZ2! z=T$|#wyQtD|AtjgSK*!?t$=+w0;nUnW0Oe%VMigqXtD$Gr;A;@w9wc~ws)fP2=@KY z_rh?lMAf45C1h@h@qQ&))6aczzrIh=bv}rVw6|v&7AA;+s_=x&Jb|x6!Y<0VS!xCB z&tZSy@$Z6NI$x0B&b@K!x)y_Sa3T2uczn9Zi!HDW|)=B0i|@7lJp-=I8X8}$(-I+as%ALPG-F^K7j0cv)z^_yFHu!= z06;jd6Edyxm%yPo&t$Ux48LI|Y;!oR<%09c!pHmkME0eilVEQuV%RKSLx$L0roVDEy1-P_v?2kh!>7jlX`FMq8z~I zg5v&PM?ol`sz{~zOtJ_E1YU;f(4H~c z*9|SkY4rIFkiQA>T>|6}Vl|;T5pvn<;)hyQL5i-VO>5)2lROWdItF}38UGenszgcX zW!k%vDZRW>WL`9#$rLc|+n?#rop``q4=lt+CEyQK%Kuod$RSyLy;p25M#C`ZfAw5AFPFDG{Md0Wkbh?f?o~?_sZeS+_vD;|I~)Jq>dYSD(CHQd$${1k2Pa4 zO{JQAT>sWBN?U(dre5?BpUQUg0bMLmxIhrVK%m>;gLz-fu1wrrlwJLWW@}q?Xo)Ra zP=D?H#>eqH~Gg?SzxZGLj4q!s1bVbI(N@atjyuww6mGF5cEVG`YKN)|KT98lLF z9Y`I;>q~Yd&i&T~fxSI!u)@3m7kp(zn|$pWN&PQYJ^4s%>9(%)>d{P@F|kB*{}udl zC0wnV@sx#PlV&r3ALEFD?B@GBY8@}}8{K;jw*3P4Y?B^nbU_I-)w{N7TcYQBB;e03>4&OatV1Nb z4mkS?b8S`jfsIDPj*Na&aNcNS%X^(xTcQ8R%xs^Cu!?Z!7kKL2x7l9JTrGy-pC@^S z9x@1W$f!3k=mxX*pQ1~RzBFc}d2m((!jTdLClGx8w)U$kIYUyl8Vmkv3CO-9oC+6b zPV(gK8)Ic<+9_ipYQZ{P8x0s56u9!aqWhTJH%TtBa|mmJX}?Z+R<`z}%`tpehnW@- zunDPGD%yRxY!qJLv<`i4&rJI*e5p~Vquc>DzwM4naIm?t_o1j41`-*}X6)JrcJVtV zbIV+&JO@~^DFXavZEzvsnYEEuP)MOW0K$AhfXl2buaJ>{idzNOLu9t4d#EWDY zRZc1s)pP(#Gx>(*-b*#6keO&*z~6aV2yC`tg!m}m@TZx|rPfz=uk)vkqZNA%ct#3B z2XQm*j;zII(Nk0eKd88yZp|xp$&Ko2y%gGU6n5=^1(Rc z?l~8I7(iyIjdZDF2zoOSDjc5rBG0g3YmwfbnD>$%l)9kBQyOX>Kr8lyYtgJ#TpYdM_l@9cUzuwtZe9&b;=LJRgRX}lf3~2dr8VOl!iu_ zdtJP)NuySHBL%$O=KS0J*0M`RPn(7~@dpjjxnDmcqD>}nE1e4Q`x)m5JivLg1XN$o ze#{lk5-4V6M9l2!G{l=2SgBBOf*o7*U*rAb7c7yiNl!`5@mLS|0lEib8}k zYI-&OQB`xNqS2@>g!LCTdtC^AvStz84byexHz}AsOrfI+Ubk)PN`spHu1zi(#6&)z zeFA*4n!6i^Q#&Af+oYzcws4*&(;C@)`r{-glvQc{^2G)khe+w`=H^KX$6gIdGpQ-; z(_wJ1tg^F^x$=H`x_E(~VB_>&07%o(5)AxT*A>Ecc z_&F=c8yjY5PIx!yUIP(GEgXSC118~N_OE!>mOydK>pAc}q{A~RNY^g2_MD2~uUW-~Y&X||OH|Dm^9zUJ$J84%VqyQDQcV01n#Uc7P z&?xnGIBj=`k`TmrT8hQBMN$$o3xH(g;@HY8n|8fQ!%lo>6yoJN>~+^{>|Nl-v$i1q zojLu8tRBegy|Eo}QbJ>OyuTH>>)-#Kb&cX{VCv0@&OGzsamzUoyq=jEJ`IcA=Br9r zfxGn$dx2b6qD#!1>1NHuk3S5lpQz153=z250F3H5@`=-KsDSb_bFC< zt^qIU(9YRm!E0otT+_XxwhfM0W$IoT7^u0hkadD*K5&y{C3sJK%j-=x)h-1jZl!2e4ee z7>UUMe?yj!9sKY=?f1;{ZyQ;#`@zgH7#7m4!o`d{Bl2B9HUl(pdMHpJX3l)g!0%|V zD@9ARd@!$4Nf>mg>v?v6Ot$i)#(MP}#aN=+B0%p1jln?AeVbvdznEmFO8L-08_2!8 zTsLq)glAgxpPde2>3nLx8zfdZSrm6Mh<4fDGbr^G@b?Z1glFSoi1P2zjGW|kIIH4@ zAziyZx<-ZGV3ybWp)Z7tczzFI=@ZDZ(K;YgE!DK$EL*Vc06&mISUnEU-{8C!*mQb? zXwU{VeA)FL3CrB5J1Atd5H>5)4*YtB{m~UDc+??W0r{ob@6c$8jWUMfCZ2yz7F=HD zHEmcg#-j15_0ql$(<64w=={JMY=meOIU5kSGpLodMMY13qW<%_@v;QQ9wY0F-|44t zg%qIIf=jmJ9{Z5C%J8hV2yl<@A@<^LI53XDdOK(q)9X*d7#&aPgeSs20~a<(3;MHR z8O^132VJ;^%^;+D<&%)ng7~v6V%nkOjW8EF+<>;tG)|M18H8oRBR|tY{2g@(&tY#z zyvJ>_+GzgYgN+ulSInmrb@+V0Uv)`{6WWmr6en&X;;u&e8c^V?rls%8Zv#v;s2g7& z5;>}Lz2s#Z%cW6z_5w=+tYJ@a<^WWojZ;$Z~niH0VxYX&6KmZy^6iloySHC z6pgMmLDvdGIJef&keZ87q?hGy783NCQ5hROvz^$SPfin9o|yw8p2F{!>RkF4rAT(M z_ycC8DYr7Cp3hfULjE7AOz#a=5|0PWWQpiC{jhWJfeWO_ zS%J#FoPQXKghe!=Db7-Ly$RyadGHk7T)@6UHOE`bK$gWVj8x%vb-zkGgc{ zJ-Eo*XB{m!ooThsa56OImtNaBKk-F7CIfAc?qGbl5+`dvrc(HybkOVB&UjD!6<&>j z_dtb=Doy}6C$FlP`O$gg=RfLoYM{N@~Mbl8-*Uzs+mqsm-Ya8rWnLa_~_c6M+)OR--EwL-iViZ(syy5S+3ljqWRm9y~}C=M*Vs_vdnIixZZ8RctIM~nv;IQ z{^=etxOgbYqvar^FH2mK()QIGUj&rgN>DS!dly1sRRpY`4l(R6zCU` zex93TbNBA3KcJVKkCxJ{?=ovj((5|!wD<$;__l=25~#0mq2M76{JR&&4HVY(@$>)v zhl(BWLeB92?1Mum_i6DHlrll55OERbB8(q&O&9Y2@AMm2NlVP&9n$8AT z?W!1bZ6@L*1k7t>aO+9IEqh7n?xSVzxRQ9crIi;H+^sJxSX2-2#BD{@&SA~0Zr)fi zpP*izM2z!lMczO>G^DMhi0O6Z@Gn2``EOak8|?_%QEIjR)QhI;a-g$ITvw`VHl|wZ zmH359$(CX4LLGD0U{7@MPyNuItZ@e*GhwS2G>Xv*e&dy}$-3!qpgqW3w}AYf`x0oF zf#uY5`DSYN0HqSzI0$_zL?*fMF7b4LxA)`fN^X%=>U~52Z4Kr-#e0;Dw&{v6+MiO6 z_TxYbSRoC!j4{dN^>~r<36OL#VXI1lhs2&DtP(vl9z|CtT*Ap_B*>5X=u`|yXft}U znO#Ih(c2}$8W@Ss1l^*l4f)_S8W~Z?T~QVzr^v8Uc92E8B$LkVGmsIIYo%Xqk#&jW zU7Qrjh6c>HY8rQhDBIH2Q~63kI(2?kTQ74$9tHe^!(I6%G`?&>ESfM-#$_zSYJ#`5 z8M1E$N*t(kK9ZJZo)^qnno*7m*Q~UG z*O$grJM^`>iG489?Xa1|0%7roL-SN%wXz()T(q{TT}JwF{4sdKLJ}Q!8g`qDM28xk z#aFx|{>i};Rt!CfkZWyKv%$X54-fIuAjEiky_lV*)zyd{G*THOK)MBZyA4!oPOU(z zG$TM0L8p=~b+3e9?7Ixrdf?9YYKW)`iP#&dlO`iV9B(g1?+2VjAS5Ny{c?|-8kf2CaP4`hhXyGrRC0n!l-pB`;+K48 zf!EZK@{hFP_G_TRug=@QC8=8nUH7 zpsBggX$4MEmz@jyy}8x9mF~&@H_ofmSN6GGnKPFSB1P31pIM1UVar6#+-d;MZ_A_v zElA}3akDF-Gq*-(7a48LZDJ&53!UVVS)1$G`shuKsqndpW3`J5E>w?N*~>&}w!M|m z?UPX>;o>;hyl_B_GWh|Saw<|t!d3`a?XEG9X2x@l;?+$+5Zc-wiFY0As&oyQsKm&6 z6sgA2L=}0jhcF6RwS$|5s@5wxLbaxA>Yp*)>%z$lx{uoSttNdZKOQ!wh9u( zCkAo*G5ln^df$5-1{ZNaOGdt*J}t;H-=+sx2mF$}Pk@R<2-X>VAzd#z-X0Ve*o-Z^ z9xU;ypE*7U*!bdK1i6)Ur&6ipt?QDX=v@NWF$+uO5GGne8Q-Bh$;wSyuY2CXATMw0 z_x5QdNnnbC6|=AXdPet-L!k@{P7$7u^dO8#1!zT~V_IgB>!K+$af@zXW zJ5Fsjbuhxs-atn`ytI(#$y*2v@2~7X+3!4t2~XoxcsX5H(mk$jB~oU5$+rwPmC^x8 zZGc##pujQT9u|eL6*tA7OXxDkZyiIhNpl{&L`Zdzu^c3LyI! z6et$btNRFxpubBexF~a!kBN_RH0e6;`2Zeo(Z^(w3K$n2gNf+Ne6rT@wXyBqJB*!S zXA>sTD8@o}(XefaQ$^j}(o0RhRjhn@l7%3VNl0WI1iR{92jyw&&k(;+9*hTR)H3{4Q9rP$7dm#CssOIxZB9#;G ztEEcZPyi$=ysrMN(hi^A)yFiKtccRwI28-ea2jN>f&Pn`(%pv1M7r3S{{#<>xaHTV zL#&cG+zIKc2leVh0p+W#nma894lMu7IV*=Vy`?-67_mkfcGAH4a3yeJJSnjeU?Nfd zwO9Ewi*7)@L;S0Qi)4r*EdbCY30};@5gmdmTEwGw__KIl#mM3_X<8)z%D1Wrpfv)cq8GuDLK^^0pXl;G6b zEq#EA3}i(>5$O^);K^rYbZ6|(!urw|unj$2??=Wo)LCfG?np@;i&~mtd0#7qTXH{6 zy6D~GnToL585$K@$i1S$1l(M3Vu1W8aEj}LkUL|TyW^zAcOgM36!-!dCw7RwRk7qo zI)S$0PT03s_jO%VW8@kAJjcRTC=EG(;n(xi$tr(Ur(K5=WsNg@{*wYo2iLp|4G#qV zniA`&=sl5y3WpJS0=`dK*2Ml_Vc!`X$8on2sz6sNZ32gCG@j_ zjjxPb2GAagrR_-eCcERH_Iv8f!nGUM=0kU+DL7AO$~?TEB~g2;cc%K@X(;;7*7Ddv zP}{{X@TsUTnsj%cKq%~&6Q})= z=tdN7p?nUNUBq|OQ~enx^bVR0-8c(#Qw|~=CsAa%p5Xm~u1`2heOEGjRomz5^Dc(4 zF7D{$ZC$~`DPhuMm0;-|wgc+TA_KyeIDUt)C)qNzq)GRIit93@h7=f#0jG*2O4-A4 zGPSVSBf|p9D=Fy&-JSy`*@%P8-dY0-Ch{Z+*Rd*tZ0_xBq}kM1vMrkm>#KNDQ&++s zsSRVvrr4i=+Q2ri_=~Nd5BG-XKhJn*TE*SSVqV@SE?c}^)Nld)P%jwkDNayNP2+Iq z=-K#&AW|tK!>8@bb9DW8b|VA=_AM3pDXnOSrBZ^yy9Vy^9M68P_Xv!TcbU+@wqG?O zd!aZEzLU~cJQS1}3rdqbWcza)vP^?iuI$L%`kpy}#BNrohr^K`P5bcu(>gp68OY&L z;ljQAzBYbIn>V6D6fVj=ib5T_OEpkjF$8aYvQhM#M7az{&#-gE3eqdx?RmFooXZHh z*bRd^U#@P)>l1TD0>O*BKW4dVFJ`fq6kZw;Co0sdT6rDk{ue`k@NHFQ?8IK{Lr&-T_LUS{C83H@0X%?Z`e98Vt{3K)zWd zJO|ZXH4why-kK+|Oo7^Y7qg{gDoB&A+g=aov_h-cY5FL5#giR;oK~ixeGx9-eOeo! zT3xhJLdz6X#Sne}n8RG9_Y{o$-ePZRUh->w=X3WNxyUx)nHg5W{=lBZ)wOt%#|IYn zZG#hPm@Nsf|KgHThLI9SA#D;(~vcx4& zEx%jdTu>eZyK7R-9sf#!*Ue@1dp_~0`$=yVo}EEbGW3!!dgKoEvWzx(4kcsthhAT( zua=$>`Xt4_g;F&+ha53NKZM=i!hE)?+QaTAQ_2F$Cy6Nj7#eqP7LM!_CDF1*#zHrK z>I_e1T%AOcKHF2iw9T{=k^1vZz)WQ(F%u)hZ zfm#EpO6hhj1tlV^Nf*fLb5DH0uHFDzONivE+&j>6m{VLoW48u-Wvy2#HX;p3&f;*o zGzy;Liavb@kqvo2a>?E(VR}nm(q>X|ASzkSMuHV@!5RkWwv_b}v2JPUlQSohthTdd z2%K>Ku!E$eXhBZ|d_Xqs()g)kZZyx#I7P{{iFYlO0=Cxoa@vtT6&-C`+xLjIbD1Ng zFQrrX#2^`#JBrA{RfP`ub$b9=a1s4NFBJ1SYblYEi3Pf#wC3Sn05n?mU;gB8Zf%iH zw9;z9Er+0HJ!ihRL)hSBr6pz;Leh{q1sy7vw=`b^O8$+r&ZQ`{)e-KufIqK-La3M#gAXf^ycN* z7&`$=#)aPD9LeVm(-`stN&4b@vS`imLyvQ-=g3Q_jIX1W9sRa;pU|(r+pst{R|jr| z-fPquT?b=tw!|9cu$+M(eB?J7If%6hb42s+_0iY9E%Su43FIP~IVdlmuz&Sb<=Hl% z66rY_Wu!d^BRoCQ-g~6gA-5S&xC})Vuv*^}`LOZY?#w3We_X3r)<(yngu-Wo;P_)MrRMbOSf-L635lDFhs7Ih=9~B|-WIxJt(^f$ z6C;_p4&toT5VviWH_nJc?8TpWsH*4Fa75|>=9Ulgd=at=^fN{w7c^fT1|auI!c+^3 zQ&rj55}Ns`Cqv*>PDBy_aXlMX-h^TIE08w!*u1 z+r`NiKx$e2*N)$2M0UVg|Jtg(^@?YS>$DR zjzlCRNG#1Tb`-nVBIm_Uaj{3%PTG`Rise;0ZO>5${BwUdsHc|^=FphP*UI8jS*_Z2 z`HB&o*dh=V-_D6erZYXh;GI_f#nj{$Z2%D#U4aLbQFJE0afPPGeKEOGWR$^|j3-t< z`mTlt!sxWCrL4x+kIDL*TS`trTXg9h*s)a$qYXx=aOiY1Zjp12!oV9zwbYF`B*Ryd zPt~=Dzl}x&_U)%Gfc6WnNGTQ#_LC>b_M+7Tf2&`KkF?;|Bks$N;j09Qxq3s(q9Y?gbal}X2>QxH z7wB>56~Ez@$%R=#bjWvMSsW>rq_Adpbo>L539DG^#TIz1t+2JX8Z|>WJK|KM^n9$d zP|<7?_~K_Ly^t^3=cAR@bGNKIj*3rns@59Bx$#D>1lubgF+3=xR=G1EROlHzGOCpE z(|kms$!#H(1M8_9d?mmvu>Gdxt&j`NU*z81Pq8s-1m}y;k3>I>RI{OZh?ACQU+4hO zZakl+qS@}n+oVpa64%pWnE9`FS~tcfDB6blh!SK-hZ`coQ4UY;mkX>VCJ<)16~XAj z?!ZQwjNEk(_e)yP5g6G(ubq?(2Z_y3eUnrQ$W{K|Rvn%B=$Fb!UN31d>J`d?s!9bS zo4MoFC68IpLsG3mw?EO>ilGMYJ2`9qnPj|= zEg8H$9Z@Z%Fa@F(h0QO=4CA6uy8Q30v|||i3!POM5DhZ^=42cH-uU7rhv93XQeIX3 zLY3^!uE8s`%aRRf7M1_hnjem{#z(W`Nx&B9ruJyiceN;h4+k4=A>=SCvsZg_)r6c- zGa9LV$~(pk2nNyCTS1MzP3izts&6>B^}H9un60he36u3Q&o86a@Ivp3M5eRO(DB1n zj>kGOVZgS8)CP zm-DwwEh1VVWF7ELI8V@ca{m_bMQjC9$)1{EAtdHTmfSoPleZ8!1!3{6HlKq93 zZc3AVCo;XG;Qvv$k8yuS4<$H!^`0mh!~AMO&BVr5To8Tb+@Cjj(puxa6 zq?i1pLD2K>;yxDdQ?xsnw2mcv41XR|5Tim zRTamSht@18`(!_Di1Lts^N(GRpkkdlFRkk^URL(Dmfj7d@|k|%dpgri6LZB<7GolD z%EpoKOOq&df}V1$fwsh2<#|bFVsOk8<-(7krFoV}#vdIc-su2nJuv1cx2?^vgUblu zb6fD8CBKHi3_Cw_8j4{y^sjKm$%1&&$kg}R7h732i?|6Uare^#!4)Y40zN&PeEt>c zy!;08F_KORoZ0l6*3LGSccgPgpQrt-Db&X*Zol)~TU zXjL(NSc^+&+|HloZ=x_Xj9PsIv0Ot zv1@#Zs!T`2>f|9x-jKvH-p=vX7xbO@KeP5rGR985Suu6Vj5^yX$>T2YAq;TWZ~OdENjXH~Go zGL(tC4g)N2d#b@~K69ucLZOtOHA(j5gRp^$vzn#ZHL4wQKZmYp5t_BNsSZ7B{<-}= zU3{jrnEs@(S}XbxmWHL>{!y0jn>Z${2pu_j3F?(aE*&$fX;EVnU4+Kg;DTmKo}Gvy z+RIQn?7;A@%FhQAp}=%IOGn?Qo#W@D9+Dj6cYf8WS1E6N%l%|AWOmz~H`l%Kb$nnV z^G|Yxv0-6<(<*WUy%or`Vm`%5>x_m9;xH>X9(6hGC}ewBh%4%R%=@c+Pce4HncHY7 zZ6q>)BO`iAZSO9whW@e@Io!G_qvq3aRG7?!xp|Q875OtxRz_a5)_$GYqDRx+P{8XT ze2ivIyr(gLwo$Tjbh@M{#MIix5gx$MFC895Y|OACoSe(7LDe{MM@0M~o1LfCT4GqQ zU8^5Y^l;qT&GxDigcnkJ{o6t@RdF5?Ui{<9jrQ%LESGrZOwZMO645aP3|#hDpZ?<}8%l{{;``wg{|pEt?5y-) zN(IK^xC!w07eq&`81O@c&veMT_%+uV2Sg7tOqjCq5>j-okS(&m<$+rs^+fFpEIqJj*sAQGD*r~C zEf<{dh8YNz*StC$}>DrQP>dDpRTdxQn@(SIlC@kG~K3FL{aDsU76m%Y%_s!Sji zY2_-bUPNtAiNW7LD^u|Sj2kSaxCPY9OY3)>U@qC`8sP& z{VBf}!xyl0qn&pH900aZt@~ldf#6UsrO^iuh*}#42Klp-HGDYrwNG&`u$i)w(&@{F zkpbSi>oAhNui>Jymf^@oJJ8#ZaoQ9?E{{wpxNV?LTOlCQw&Gh1_;FLbZtrmLlKaaR z8yBmSCSBlbG{g~P0TeQV_>_xVT% zS8EMI9x1D-eoVp4w6>xM8lPuGJ>uOU>MdO|Si|$2qR~|Q^JV&Pgw%~i3g5s#-*{`T zR-$H)EQzO|$IYjhJnkMTDvdg2MFKJ{>^a>;4o&lD)Iwh(=lxC@4!;8COoituO>ZC> zTSFxzfA3w_{{`-x=MVTR-14T5@jM-G3~`n#bEpMd!tw;}C^SI4B){06a}f6VlZ8^? z4+_x4QIBKL1DS=2EnRtfliCs*EmjmZZC1fH@vk(e`Jq|Ge{9O(lf>0a&LB3@CuIt% z=?u3%qV;;-OT^J&nQgtjBSD`#GGOsRx(*Zf!BIHf!vnP@i+LX#ZP8qTISTY7RCAQ?+`s_>Q6|ft=@C!@qL4ozX}XvesK%{m5Ln$0b7JJ;lJtZ!ZI~ZOC{e@P&vIc{9#ArgUH2F>)F2ik2S48I2xS%KERnhC zgZcS2j>rQTWM zl82aFAp?J9mdbLkA2YJW0v3pJ<`BvheOGTK4)6=TIr{D(j&dfG( zwY2%*Kbs)Q*9@JnpN# z(&!V~UVu2s&S0z6d0vM%^Z0m^^W!;A;rR=v3%%-$^NWw}3-ktXdEJ*EYA+fm2v(o>F1IXe zUf7Cky<{ROBenn4b9?D^Wz1gtK3({ zYgP+_0>QKTM-JmQlP9EKgNM&)P=oB=ihkP&=vkFbHOSDcb{(__l$t+%-ugCLr(U(= zj|+=p+oFW1qa~cUt4#6)W=U?w@q)x{mksf2%pg_cwT4+eHX;B1F2ce@R|L-z%-Usb zR|n53N@7S`1PhuvSf9$`WClNN0?L-FGo)ZCa`r~EVz1Do`CDqc}J)V)NAVm z%T{jb$e`_FpcHbJgKtJ3X*Bk7GM*JChSEncV=*tiCsl0(88BUkYRVzaR8nYTPGvZb zmSt7Z?66pmr6cWe=ZBa_HUj5OWv8M&9CHe{5^(eBsSZZAS(7BPW|ykN02MivS0V_p zSZ|udV*!Q{=f}L61^5hBnp&cq*8sc^1?;PF^t;LcenK+c!_K+RC*@}KHw<4}WXG>9 zq3D?Ie+-dbyTxud5jiUK z^%V0DRFDoul!sYJhtFt70LC@l3C&1or$-#5m$C13qRN>(;Zc4|LKHpF|93!e&E_2b zdBKnLv^$=jQdDxNDY4A7aCCSw>d$1B zP8C1{o&fp9Um(|eZY|0EUHPH6DW%Y|`m!!?AOTS7E4Y|=Dsz3iRxIs{xJ-l>l=k-% zQqlW4Qbpo>_8;4aubjv%>^fWm=J3fE zrM(wN)EcQzT;0bFA9H0!E$sRr*H7WJvuE+S?i68M zUU%O2aI~NSz+Ys8GWx$gAy;8BVoDY6aLn&q|GQB+h zWhvwqI z#oEJMxFWYM~$0fp*|orCBH2B)Ajm_>jyY#q5Y@B{{b&R(7z~T zCk7OQUQtyU5T zlQ~&wgz+3`{KMpL6+7&{^weXjU|K?>_Y+9@rTo#TvPHS$r>p4ST71oSn|G+E%lb)I z#mF)5t@Hsy?B#>U_$6y-(<{6th!6jYc2 zO&yJ^NNZD|2nP3LzmDN ze{dYobhAvdQ?~Jt*HmvH6ficEicSLjh$rT3ZXsXD;%K?fw`ynp16r!k+AI9YfEH!6GJK0v z(M-|j>fPm7H0Ay&S@=CCn%D^+=F zJZ~NI{yW4)bbFQN7^jK2j5FKxqS8PwIs>ym=sS(?qqx>9sOO3^KI?Z@H2U<8f0zEJ z2{i9jSzYL3j@Ie)d{_$2(Qcyl=Wd}=z!mOF=UjddgkBv)QLi?EjP=Ypzo25E++uf? zyL|>|UQgjRI|-&90P`cKhzr%(ZFEi=;4x{1`IkE zSoZ{Rfc}|K+Etkvp!3}ne=gk$f3*DzbQKzbK(j0=5jjT*)XN9S1(cV8GDFnP#aaN! zhsm1$5(xmAFh5tX{1}j&HfIv~We-FyzGn}#e1Kr~ox(NkJ-|1=5wMUv4!9+2G=~lM z0Y~g&w}==UIskn(e})6JyQRGu!-U-<7wruzt9H>E?o{P?6+2p4FG{45f6hiLw038F zBI3~k&f^4TJSH^jyb*Wr#0xZCo*O!RJr7O!uH<>j@}nu@T0KfNJT%T1*vpvs6Aczz zJ5XaII_ACo`I89=R;gp0f%e`|Df<gS>>JSz^GP|LV-)i!5M{ zC{8e7Zfw{`01O$yzrCLX0{x>u9fQ}rf$lnUe*j&FCD2mEYBRJG z01bi8cw1=+P~p-Vo{(b(^1c+q&QI8Vlg{5_t}ze9W1M9biV8qP#vI+9kPHOEh9|PG z^Z}l&LbTrt4!}urC==xUjSe4{Ha}_8LpuhqT~&(mv3m1F>%Y)bXk9J*YZkVTXHC&& zX2N+4EgN6=-g`)ce->Udx;368z;v5tbMlTXG^_Tn;CUYjnkl~bk|FsGn!M3tC&@UE z{!PZ(P2AHy=DjoWBm4Lno@4xeI?U^>paiP27wLU$Wr^B!J6D`m@1cKp#;d>H53{?Hv1CV>UiuCwZO0#BvGPOD9O8j=X{WW_*O* zm>{sLHx8{Hc>w9TGAAz#LTHuL%AN*Q?3 z#{DqoMRGE@_oneLMA6WVHM7pBsKf~8q zBu=4`x9uOO{{h-`C-&Z@{)&<-bx9{;9t^yC4$w;fFAaIvG zogbOg18$dLe0tG%;8G1d!A1}d99OnPDczQU{RH{s=MVhA+8ET$G|>$#GPRw9=L>pgG5L ze_WyO0gX8iGrz&BI_ACi<7K7Mx!Pm=K}_&_1E#k&*y{gs4-!H>zpHWSB@!{c6rb*n zBMn;Rj_BKZd=Kp&eUzaJuNmvFUsT_QxX$jTJk< zdUg4&y!1n0E*rL+Idu`3SfF&^&=nY*ntv2zrvP2oMD zoj@^#f9+I}E0D>tsqnXc2jW<_2`Rb)AbdzfF3+?CxYYr@_X29s!A4ZZ&}0(YCx15_ zZoh$cqt*Y)S9POJ>)L8HHBGdxMeB1TRS~WJR%1|~4n-?vR!94hacIR*+#{xY8!h6R zFC`USL=(zx{XSOI$GrCsJ}X|efAl)W#bRu)*hXbhv$2eJ^e{deSUnu%#&1V63sXCh zqylK;tewPmW)I*fl+E!z|A*!An}q?cb3pmCO{n~C7|_!nG5usL0Ost3cZ(t-z!62; zz;%f6hyL_4a`^)9@a1T`XI{Xc`?)`>-V+4eMNBShTY|vf^4(yExcBIQ}& zs3nb~=v)D|RvC&*HtE1JEn+`-`2sMlj^=*})&hpjgg`S&R-kw3jY_6J5482n`H_$A z01a=W^loD)P)g%(DL35)e=-G!Fn-}3ARZs6T_i3C{A_%?S2SkVVZ1n?V3RAAVV(qnh}_kFLZd>IRg7VdA<6H7T}4Y zDXss#2z)(S(WEDe*{7?Qp`g~Eg(qE^W8x00tj$_TNi4sWkkXheVe84CTgqbb2f1-}gfG5vGUdFA4)giH* z!pnMqgHy*lIDzSrT^BxWrYWFp<-YvQPi2@+OxP&vrGqvIYOg;PPe<#n`_DWpnbAs4 z@9E#4U!&>KdsND6|M~9!s+ArD6L}uTp`wlW(a($AsQpzPKkNN6G+KQ6#9o3bTA7jj z?cimH)uT#fe`&SOKqQNEboE?2P|fizaN7O=2DOxji^cuG`lBppaQQEA5AtqXiYEX+ zcfOiQMFt2K%1~Y(+6CcS)(>|EhCmEA+{*mz5{QlErT>0U4&s+MZ;`tsf_VK18TX}q z5LXrrqN6?nv1&E?KV-=u`Y<4Yt4$V!eU(zymm*FH7SR<^UVcskDU#QeeU1ehN|hz&K!0 zuyv{$=)V=ej0|=H+MkyG`5E~@J;>|yhmsN~CjL;BSmOY3Noek^4y;}u{?au+@f7fU z)UEyvf7$@>@O=v7C2u3acx3 zvsru|z5!gW-y1ov?f~f!nR022G|+CZ$E(pNe*^PvmSNM$BjD^)#~Y>80X|2CNTFU9 z5F&h_ixV^jA~PxzHy%i;TIEE4xl5unlPg2HngwFez>a*B*zxz)Q(^JB}-Ib$5 z+xLAgWqPpy{tEvPjTHw_Q3mPeX?+2P&Oy@TJttu2LZ$};DZr~xsMPcG5Cnzqe-~W7 zoDZTpfdVte(;#urmVlkH7o_R5ZoYVb6=bG+Bi@~30oki;9p-+%Ae#mWGMH{5+iFT! zg>MJ47wajpK~j*};iRLx{{UoocZ#I4TtRXrfz2#O6eOBg@bT=+#;Shcf2Wv86ptbT-m7dc`25cTcd+*9%JdH4*xU@p2&2NbZ05R&;wtv$M zgg)2m;wjjHKqkqVKq?V%esrxLdhVh_$xW4$%KT`rtk;r2pcd_PTu7Mze-ePUSKqu* zV0Fgoj|)GeZ2zG};_dWXQZ>hM80EL|Wwo3?#tkECa_gE$XfQM{r@ZbQTBP}@ub^`e z?LYc{H}+{E5R2Ff+nJofelMqZg(gLTg)-c6@I)POtL;tol^X;9H%);qatjdA;8qU* zLIV;5>Rs_)K7x#T)pv5*e^`)xe?vORS{CGmPNphvp8)yyA0AVhzX$n0-6^g5!XSSX zW~(;F2lAed7Y4^IKyK^Lj`5ENAiJ+?xIIJ;GVfO+$b)4;I(S4AugVi7&)mK=Uy}>s zT+@rvT2Ua9e_F3#(+-3xt`ADTXvFS+`=({_E#Swu4aWw#fmiIqf9d250Pg#(;p(Gn zz~S?_@5PHIV2!8!Y0&oym_`(?PICQ!J+G4=$vgsdrZofnPh5a9@FRIUK&}E0R!XS6)8yo_B=m}x!5`2oe86lWEB9Rmy`npPkBN7?c%A- z4FZsHb~se~OAm6+pVqPAFpyurU)k}M9Tekp<7fq5gA#-Bf23H*Sy0jzo7^2O1EmXv zpN#5;Kryp9QIB*B6zN4{np4(5elgw0WpWhcuC>~`>iK}IjYU)PcPWtJI=y+vIte7J z+3%#=dxF?e*s1W21Q2PhosSsY0-;iwL>)d+5J=x7B`DMczJv^}SAj0TOZX-+U-BAo zWoZs1JtGG8f4e<)pRxRjCGu5CuW}zS{?HDTm-+$p5n+q?e4IetF>$TpxfhUmnBa_r zB>;)0kvQs10HPY6b=f^O?0j0~>hGQcoIg|AZ=<)-;cw&n6vo|XKW2uM;pi*c^QV>S zndwGb4<~LcGvCMhwi6uv4KByLkCh*8Hq|Kqc4i$HGT9P-$hIEH_gFl}>!W zQwp4*f4uvtqJHHpDC^(G_9~Qv;+mhb%6mpoP#Fzk?B@k}yrtA%6}uq&l6d3HU<}A8 zu=1_F4hBgJ+%k3Y01*2Za?`xa3`8Chv`f(31R+hWgIA zk0Td#!OdpiJa@wQu|^88alHJV|I7%O1+Vref4rRshTNAv+xqxG(@s_PEQb`x|Ay(L zEi(hDukD;1H4za1^UCC0ybbt;J?`Hg9RO~MrbWs*0l=Z?CVN9vfDUq=MxR)!MEgOB z&--v%(YB4S;lsYqN|ILfG2h{w1m(q+T9}%lxunIt1aU52* ze@nlCNI>-uQ~ChV)>&{(fP(JaUbmz$~sIrCaxJjCV+KRqYg;XD?U%hjUYZQU{?eGLbEH6=a zuKN*dO#y25%)ho>9)juv?+>vdi=cx4eEq^cS zgX{yUaSF9Qkbajx`-OfMB&Kb*UwyO!F*DmP%SS07-27~Q>F;Y0JeApL!MYCoH-;kl zM#zEJ(M#JPnFzSaL7$#x7TDumDqH-mjoES0oCaD&{FPrDWHw5gtrx_<$jgzxlPR?+A%Zs!uiyeB$1 zMRq}r=>pn5+Bm$xJd9Q>e4Kv%y7FHfz9sBmNIPx}p{{dUz5Jms&@9L4cWOhA(Vnb& zXMr;%ka{*$oz-9ihSR@pCvHvyf0uy6CUdSX2t&p}Ib%!1}^FlecW=FR%r zgBBGl^V}aw(CC_qkhoF{>TNh-s|xL)#zc!BrYr->leM#^MIS-wtm81}e}^(qh!x6G zRn`Exc7L|yI3lus+AZ_}6!VLRg=jWjgrz z6?8DdFYplC>$>A%W%O3;1zJf?y)3iLavX;VV#UlG8DYn`_mR~B$yH9Y$o=K6WlSA9 z(iX1J?#}{B_U5c~dq!a9eLL!~YyiBhG$%!MDnMAVwcetV1SDUle~}fOOb0oyoAs`S z&p=6Mfp+{$GN_SMf>jzdXtc$=ixjvATD`RQZfaKpTz{dHK63{kzBP^TH3A6LEQoOa z0B|YPf0v#MkdesIzBn0ZF(}*%ZR`OJo<~piR763w=<(&f%4|@crA%`9y9kP#&nHYq z=s_vwe>9F83;M;cPbC);bSt9^UC`5|Y4tVaKN>9@7mcMP5XaKLBc` zN6#pAdVqpCU;5_LIUv*2pj*!Tf*o2pZL&vA)(p5~ zBVPKx=iljm5@1vMS3J7A0{kcCYUnrWL5z|2nscrY$UJ?=Y*XEd{XRu!&{!j= zl=ai+1gwF^e-AUk2l4ZOzB&w^pPU7lcjJ3zNe$YIv8&&c%Rz@c&NE!A2efC+4n^5Q zLHk3AVoBd1KyTp7YZpHP@K;>xvMm8EW>H-&?oLp@*0cHW)MHS+-t?!^9uK=e9)9e? z@&!dnl9#H&?I5@I?yf^XFvtqOzr1{e3sQ^NqD}5Me;{6-YFWpy4I-&AY^r{(Aau%f z@$SkEEWcZ5AD{gNyrDK7jn^>!Yix`obX)+~McL`bg{*;j+?t^HwHnZ$Dof5+oCfNG z$07Duf1M(XoHlf*8pvK)mo|E20%1;euA{?stna>b#+&IB;M$lr%$ro8qq2Cd+Bz%_ z>X%Qlf7HaHo!5aWw|NuM%6p&wh^Fad-pBXU*HqQBj`2SuQ=zJ)Y_#@UP0foF7YMF| z#t(!l0^JYp<_D&|!1*ZP;JYUW2xb#-y&%g3NdZEa6Gg8;?#AkuwmdaZYE;1E`|=9Z z4Vpe}FWd)|I=y90=Lgz0EL0TyUZCSe<0o!ne+|0cRi_NoZ9q4u{i8`!A?O-^=j+X! z1)UvA?G%9t&^~Yqq!cFxZOH+gL_Q`!^F2vFmDxe#;3w5+jTERgRtQgVOn{2F%9LOhYNvJ@b+@@C~u ze}yjy=y(+?RptTjw9(|J*Y;RGJ9s1WpbXgGTT+Kk^8<^@!z#g%hd>|YL+=oz4b=Nt z&wmvP0Y%=q7-^IDK=yf7#68O${}i#Fr*lUH7k05J#e zh@(n5R^Ql%J0;!*o+p8iD$kUIh&bgzt1mssNIs0?j-CSr+i$oZf(Ag9nw?r?Uk5bD z$fi@W_&{5$mg`1+4CwR+4ga`B4Z178uWr*l2EBzJBwS25pf`axa!N=a^pe^3e}WiB zK~IV=h9O-PbSXR2XOs;4mR}AmhKbbzSTPNM34toon|6L>Ks+9NaiT*p6o| z$D0QP;#h3G8L>0H)|((4i!Sm^Y8Dz^axh3u(6lK}d^_9Z29UxL2D zB(G$13h3cnqAI z{9a*NnU53{wnUho&+LL+3tN}Tt{KRD+gT&||9hgzy`d&P3dEE~V!mWw0O4oEZ(d*Z z#`N5aS2f$Qyidw(?QTRWaQ6)+n-kUpdso#aTUIQvi2f6o%XI?!e;ZeR%=u&e*Q^-3 z8Ifk7;J^Li!KFbU6WLVPB&7pFrr?L)CpQ4ET=VN(7CGSLmUokMRbcgXchzTcCba*u zasAwe9@?hvA2z*Fjh6jw1jcUsw+Cy?O)SaXXZAP_Q!=Fkf6lC<{r;?d!-EW<)Tp{* z+FbsQ5II;5!4d@TZlkwn%Bsv$5? z-7xucQ4RE3y?^WZQGjl`Ayw?Y3+PbEE2b#y0chCS($-!Df6bI?&K$NvP;2Uq5g&2^ z6>4Koi`xgFs3W-<@{1Yd4#wlk*J(gzMAMZ(BorjagF5b+kbs!l(MoVnAqYPtJm2E& z0|Jjp$@R8kfRFTQk)p!^a7UC8w!Pm2_Mn+@%O_Y~Qs*WRcG?5e^DlMXl6L}XypXYi zogyIj9Ax*{e|!t10pYsbubhCu*C+pT-f6%SHJDF|ZU!87GAjD_m~W0o`n{#FFxm@= z?fBcXgSKe@b&U_)MoUq_xAG?*AM-vMDXegkVDcExN@bre#&j_piE>3H9!sD($@|sE z*9ACDl}+I}2MFDs@SFVPkLf)M<F z$Au-(H&?r^etjMccJ*-kghjyc=w9JD??y0csGBVN83abowu-{!qhQ#$>M$bA4+aN2 z*bHa|=wGk3Hhq z5Kw+8_mgrRc#FR4U2-)Bu75a!Gv6(N?Vn2SkR>@Vv*Vg4<}3nTxJUDIwOODdwyE5Vfm~Irtj+0&%0J6b>UoEmdKxyipvY2@{XefQ|U=Sk$ZGtZYt-g;ycjlET zf7`n}FbJVN&wkYsjMy{ut60*(m^uy@m$nUzKOH&DOr?UcVq~oJf;kxZ*zs2l4uN45 zOZ!(-A<%D2;kUaU270dIkB47RgHDE-){QE1fVU^oifa`>i+;Pyxb88i6OOA6UL6D# zwyi*1t}0Lz=6+5DDIizx>L+3U3S=IIe|vB_Zh<7vV8K&iS`c+6bCKH42cZJd`&ZO6 zfj`SU=%SM|@QTw9ooiJG&M^!E{g;Gz{J-s z-W}kL|E)esO$uVsp%Zy!!XVd~;jW|_2r749@pD_0g67U`+sg_ZpuK0=u}YMT-NWJ^ zAIzKr!%*D~t8iX0CJ(l0mnH$@f9d;cse5Z+;w4>_2(N6-^_#2*b(6%`AabD zx&21_c{b?p-eFlDd<}a3(Iqte+o1D$K%?JU3*bqiq!Wh+XtwA$|1+5YwR}I;>=|uP z{-p6WGcW}dD#wC?6of$Td=qDgX&cC}edfFDYX=gj=*R00EN_h!VWBXqe+9vRkv>7! zg@JF>)u8_ABJkWwz`4}y2OLQcs@e@2fTgv3tFR;<7+HqTJgl$>nm-M9x;noCxn@w1 zTV*m3H-@<+5r+d2PJi;6J{#cP=B)1N`iBm;pD2uAb67e0I-JO_Fw{B<SKvJK*tQLl5ZZi2A4O|!nq2nclL^ir1@ zWBTM#D`Uq7a0|Kde>U{o0(QbM*-ydEB?5D$cv-o~Mdb?eH zW+x1YxW4)PX6^t2Z@=|XI?Puqd|!Ph2@maOUq7*Aii5US)OJ_(sL-bJM2Tn{4qBgM zC37CjMJuM2yuzWE&{U+|nQJ|8%=@2{&9PkD+{bvGaQCtKey4aXa!??_#~s2@yP2h;6?!og%y$$}um6io2N+tzb%z-Z!eC7ZG*7$*4* zWOvtrez-|#f4M;|=ngVAmYZVr$4twa8D?%kY1?=3*!4g|n56Mvk1wc_O1>x+z;q6# z{%$XI2auQO9wF(goIAn@Y0@}W4|mCO>VHw3i#zU}e-kAi5O^IoP3Aq85UYx4Ze zr+}8kay0Ar0?>q{zkQ%s(=qRTLFcl(GFp!D#^^=9mFr?aI_E{o_;wOlm1Bp_@704~ zWsLzpf0n08Zx_n`!F+g1a-Eu^<_@5tFQqf1Q2?;g**MCb2)bMSbq|BP!JzZ!mcl>; z7>il`I&TsSCMVw&X)s~&_+B4$=THPps@<(NL|nl*^Y0ldWCTVYXZKy=M5A8^a65P;Mf~=*`9apq&Xv-b?)dBPU>3L7pSE1!$2{j7b z5Hy$FK6{EW8I3kCI*eZ(JLbJRkL8WjJfgM?mt1^fA{FE z{!|3WG>X%eNn+oDmeAVU{p%pAZ~bfr5rW`IyE3B72fiQSQNo^B|N5!>RL2YfaEK)v zv(qyIi+PY(L(XMjBzC8h>P-VWJ~ivt_2+?#j!u6_^D+<*rw>K?ya9YkwrU^!EWmLU z9hf?Jh7L8{)I@Yk(N+Xm@Q;Tke`tL{y63h7yF+d}IaX<`j63E^}k@$%|0YZ*vg>J@SfV0dw#r7HV z_YJA|`Kn(=yHO+|COvFuJ;I1*jg|qenpqUQJ4J++$b7$@ACO0Lf96U0bP=OyCi`Q~ z8P+N^-ZHOUX%>zKftSQhuJ?b$qc%;aOROLM)A{zKe)+hxqyx3+fbI9uDc}tMp)8~O z5JcXNOwBWhfNZ=MN4Exc9<+|#bC1gd%>>iqAg6!*X&o%xn-yg%rx92L@H{t4Z; z&1~kG8lWBM5|G{#2k62% z=L(Zr@5YE{2+;GAhD{s30h;^bAxdf&fWmA1&1Zc~*O7nyJJx^`@N6np)RK$PffxQw zftpgZy?L-P(AS1G=XmI{+{e($D*?ir4=$ml$g_5iA@pcAh^>h8B`=yXG|KFGe;Q5n zT8CTVd83g8f4#b{LT1!E@kI1$(zoL{wD>m|wMu&*nTbD;k+ zF{$h1CotqySfvtI0z;1uSN|u%V0f~Li!X)}48o7@e?A#H1Nvkck`gv2KsOcN!&P+> zbjS`3a@mNndNA)*L(ntOczdUZu7VR(jc58&ZLvP-VCn<$KsAt?I(u&9(Gkc*T&RBW z+YBVIukH%g#}u(GKJ~P@9|Q@;Iqrq<0Pm7%Qs?}4;HvZcC0n@xY?5~_yQ5@anvXM5 zV!?dRe?anfHdG2|B0Zi6Sz_}T53c1s({}}uME3!2GaRoK^WGwI`3CY?b z8r92Fvg5|)=H(5fbXlxX=btZMSRVcN{z>C#e?UfT`>EqN9BkgjrR`P+nrf*pk|#WX zn{ls(HZuT3&z#KpDD4HZ=@Go|n%6{y~>=_Tl_JgtNJox&F>#*cUsIE ze^a~wF#I0)ltzt4DD#G_(J+Zk zy~*PO^cSbLRLRp0_5L7)t9a$8O^#1Z-uFL!sjim5yYgnzaU2fUmrv(23jm$38HF*@)u638B(U#K1*o}b z`;%HbXmn1&}_5-d~dDk*@1=y$)o{)aY046!nrhW%%p!+_ajGtr* zG8`Y3+b}OARN)A`0zAYl(NeQc5ilJ zzL*AoKaEQ!!26RxeF1L`I9H^8f;i?kj=ijHDlh|#QQKWQCLKU$Snr?pHx;P*JL2CQ zL;%^JusnS!O&~JL6QmF41AL+;O5O8>fa9g!e&uB<+E0^ZjF38kHX+KMPbCSh+|83d z>DGtl@j>LT?|C$3e`eUt%rS~4vJBUW5;CxyAnbF#)3_@K_-h_utu_+i zC6m)^idqCtf36H6sV8{Aa;1Ow+5{;uRDCE1NphfdiHW%ocUNsLhwN~h4=i`0K) zeOJ*W2i~x-XdN0oYd$BN-He7auUmY6^%V8RuMi4be_cSmGH%I2yth#Y-8G?N$vxCY zayXb6Pk?@U7L&>@*rCQU_gt1K0aVSP5G!NwUthGS^0Ku;N5_A0s3;I;pvncB0_JY} zAY9J8DuJyJEGmWg>Nj(&hjzy;q_+%!;J{>X4CX9wi} z_I7FvD1q#Y!jqwH??8&awyZ1)^HEY$)jr*C1EKrFiCUkhfNxK_R-XMSaDRD2Bs6pv z*iDW+rbDQKS?3o!*(wLnJCO4}BPs(LFTwM{e^{PD9$MU&#D?YnLWA`cY@~qiC7%Cc zY7=nAa;OC#wxhk4S?60Pu>3z&;RP?f8d?vwH=)a!L#xWAqd1o?qd9r^LW9|PG+~E>63cYkyY(+c}^7DOLOAfcDyudxs;^Ks+NeF ze_rHPa-MBMjmowyZ}J3C&0f3P$DtzhSwH-%yN$vz?|H4kd@WqM$M}dxqhlhI1!!oW zYHQPl0sB_k#EN4c2r_0pdmkPI;wSR%o!yegT8q?B@+lYOi&aX$;eH0iGa^RsX9+-A z;>^IXA0wz(o}wZ2zX&RM&4euJWT5P;f7RQ;L5}&vR<`W1`(DA=`03lUHju+POjkZ) z2N|U^l!;CsK_bAb+9VM>_eFpugnJnT>Xtq}t04y7!He!CSN~z>j<&x9{ZC-&=ka`D zPYsNwvkRoDSAmWzgXJ&QU!bD;{U=9C8c1DaPidPO1EJjC#j-SGY<|J*WzpUde>$k? z9L3$7M?3vbh9k>G(S{VfHRKFOD=WG__#0km33ytUEOOAiX;_%gj6WKGXuX`Xcn=Lz zPrDUaiK6}@_H;T8Pt?ullT2~V5_Pgv_*(MBpq4D+>w)*Nzkhx_$W6M5>d`s#fm2MV z?&a3mYsICg=F&q`m%9O|tVOV2e@$W)eZ-gB5i-AX%=^3FD;X5+w~z55O@8s0v`;|g zIgDQ*eF$vy#GdN0fxvG9t(%oiAQG9GCaq2fQqnt(X&KcZs~V-F8Tkz4>=^HnMM{8t zW9BM)MhglL8Ms#sEkWVMVAJ;s%x7ew(lnQGaPtmQzFp1 zY@-RSorpJ@Zze@cev1aNfAx>iY|I(YDYZT{rO6`svU|j&gxK9l=Yxp3>8ASM3k`5$YNdCUb^Z==8HaooK z1Ca4qI=|zR4>F_g0&iZ%e3jp}Uwyy%5Tps4GtNkqf+XGbt?&yrAo`^ueW4Tc1=~30 zxSPfT|9p>oBu*jle@Z>U@9eDvj<4-nlffT=MZcVsaa9o*$}TE~Sswu{-<8aNRntI$ zqY{2|VIPQxzaKhp_W^-kwp8W&GQdesx|B3jiT3uERRylipe>C)b$()1v?l+yxmKPJ zEz@PUcI8B&dB%H#ViEOd@<$%ig2n+FV;wKM-+c!S_x98ke|68GVTrmIoEUy<1z0^q)wI|6aV?{cQd?o zt50G2?r-l6f8v-FU^EZS?asRmT>5oibBNM_FEpO*B<8;sB2joB^G*^({J*tS^-+Q7 zX?~(@#@is4uW=JOd;xL1rFiaZn7=V{vnlV^GKfe92-Ic=gU}se;ZSkRck{;ke!i$U z@cc=pG}n6toHLF|H+XD-HM03@mM$AGDt9dSU&ZF^f2v*mSQb5jO76DATS=_GYRFQl zdU+EF=tH=47;XSAqhXJ;%mz9jz};^9kc_tSZwfuvypGm}DFhQMKA`15mp<=sOEiZ^ z+xy&!6iucpu+H~%ps~fApAue-XqdqxSn2E_8u&12M2mkK^)?lXkH(xqeSQO!iO+AK zUmj#ye?ujG==-3X<>{;;RM)&cH5E20PNkI=S!=37hKTunhQU~nF)^L5ci-*ErTNF z7;oF$#kv2f7Kn+`h!Q3f1hs72Ci-Ku9eSGz{fOOFS+ah{O$Gu z&y}%roWF{cbw(TnaazPAsP{o&$71R*hY{GlA*N z_qNzRx2K6L<1DD=vZW(sv=%CNJ--(de>{LnI-55ub~aFvvp?JGWkQtwJy!L1Uo0y2 zJ@gzXsYU5WG|yv#qEIGjtc1S5u8K5Z4Cl=}a`+3Z(%CI#FR}TWzD4Eze~@IX zf5IG3jh#!}3_6fLIte@^9mb{{;lTZ<`RDK|7B@@7=^k z7^=^IX81J&v^Ei`Umw^2ReC7f00j+@KK;{`iS>O535m1FkIJ#RSw;;7ntF7=-B4fn zM;&ds+E&wJ?^~|Ee}>r7+R;)Df4**grYV}EU?`xV$3qi82$V9`WzeW*;Ef=19rX7y z9VNH#Wz?teL!;p=9_sS-JDW%vf$c+j^<71X7yT?>A?kE|g1(n)C$6iRi(JW6a!)g(B03q8!60>3h-oqnS|j=8kPDRq8f++Me3jCY^?&4-I^^{$!#1t1ZH}@;LuT zJi5<`XMSyx=oqh2SGP)AzW`iPAufJiRUo@k?sSJ+4BM|z+~-8N1@!+MVxoOWfyp>B ztW6J_%PietQmn`ZW_d%rf9jA0VE)jxQt)pUm{uZcL+bwmWA`1^-t8Ws8^ZlLb{fm) z8R820W43`JL$tH|QVI~K42~0~#Q{NA*zFa|^MEUHS@2e)KHBFwEpY)i8f|`k+{Db3 zgVt2I{@`7A#pd*uyG}wsnx_4yo`LPZANQ1!R{61qMxKUztB6@fe}fz+J=HV=QEy1f zuVk9bsPm%)t{b ztxb(Df5wm`lv#r*PUayL;#9~S zN~V+$Iug(Iyg1Ik@Vt0lU7z)Kt^L{8{;j?C@B8HQ+w)+ ztiJ)`zU15$X=dWx<@mL$-Nt4VRxa^}GVQ~ZZH6aj8hN)-w&e5NrF&3Gp{G~a={4s~ zp1jNPnis@Y}$EmPUvZOGox_8869TewWvMPO=5zhZq`d$dQFfUw3*6 zGvObygIkBA)f_{^D(f$i{9(|d#e{8yhWQU8g@wdE?NYg7PmH!EGom-XP20cXhrt<9l+mn$s z9(ksg%NRix`Kxa2UyvZ@&aLxn9=@qb0+hjGf;UfnjfZ*y7d$?6&Rx^qsDnF0kYvMy z%#C<@`dcUeHh(uP-Kr8}^opZUMHuZJ(>Ow*%~CjQP(@3jJNHw5N#PHLe&tZZ$zft{ zq?WWY#zvyh@0-s3)^eCauXJ}u?ae{rUHkTJ?v9EST1`^oenCMBRZERer-TUp^t>q# zOSywzX0?h7>`$@aRN(OKmp0~9^uy&i9q{$BcarWd5P$xP*IQ+hlQ3F${`4ajBYfnH zPW8Dtjvo4FtTsxDc-Lw+|Bsze?9(CaE_64Z>j=p>8!$crj~d6ImbpcrJG@AZI^&rAx;Y zDh0e9Ykx|5aNqyO`G)N-NYXD_zxRb7(Ty1khWhCUZ(wQNWfzW+yYfm_bGLAVLq^T) zTNN@S#E!O%q!OI@0GILdCy1FfV{(ydgKLwoOA3AEnzcMAz6p;4P$wxk@2diT{P9o7S=^|8@>A>D=-IsGROn5W>mRSx`k=Qz7Y@Nwd59kohr14gaS`vY=k$*8s>WsLN_IiLx-5fsWlZ$nOIpI$K_mOlS zf>XD(sJuXu4o*cHydN0uB0{_Oubb~a!1`cY=Uiz9%pO!-H}$&(y_aRnQxz>6dAHui z<#BoJzrOcK=PzarAw^6}c6U$BE@7UwxNh)v2o~l4Y7QuE!{YOc)#4^vSmLTaKYu8F z3rm7BY$emSSg2Cm{M1qs3jvaMw5$~gUoyg*Z_5k(5ZKSr)MA1uGv0Dy)EeU~_4GXb zJQz(qaFuntF#1P&_XrgDqc=GEcHG50bkgNn%IE6f@1(qvuqac!D(GswNJEcCmo!DG zL>g4voxQvBTNuhjMh3^t2_BOYt$%Wp&PNnYy+3_ms18r4q_#%M5nRC7U&6B$YseCR z87VKWiR84+=`X!>ac^_MC!MHHX?u?F7L*mp@N4Y! zLjmi|cY%lA$YE@94(Ieort#5xCk`qjqutw@|8WP>B%1avSVbdAAb%%r(BcV_OnY=? z4Jr{q_E@$JK7jDoI}@nZ_u=8BYJi0R74C4qHTf1^gX{XbK8vc(2)W93RfKIPidcj$ z?^i5@=P{-`_U;F8wYs}~=ME+~=rh@AZefP46pP@I<~Oi7y~BTDH^{@TG`wX5S2`>pJaV>cBkwwqHSA}s zS~ueNNqg)=mwpCIXNvE|- zA$$-A{$#~m_VLC*mYBjbsuJ`>+FyApu7d8&S#1@MWV~hCw|}(QH-okvMU@HPEYalt zWre>W8a2&_CAc}*QC%tKcIayXzjQA0Cr8dlFc7?#(02R9hAgC= zKAGlpt{sWpk_O2=FA$&l!ga#t0TK*SKi$yeB{+I9h_?4X4)>Aa zb0wuFWjPMMK7X}lAKo@0rNePuxO58P0sU9m7{0?rRxiLS`VFor)OPL7xsPXzv>$XU zgy2Baoyj3T3zu@;0ja?im~Jt>y=Jx>M#;%77ihX*U>+GRZBT~eEEWb%@smh0HZ zc?ejnwn!BWg6|s{ry}()kUgxdoAxsoGR72xs29`_=znN?{(^2pFhC`gIX7V7zy4B- z%F>Cb=vKTtvpq(Tfr4)SeQ)*}O5;Pfs*xY*3c3Z{_@_Lm(AmO-9~$epU|NJR!j@$2)*BTv7OOq9+fjY$xKnf#J^skMu1UWS0k-e$vT9W+-pm&k%zD#i05!$|@Pjbd6!cg#T`o#E-3d z+P4vW{hG!^1HrpxmNWh0=Z}yJC*|XrJP^1Hl7d@4uF4m0+G8Pz8<_`%5^fp5@kG;}A#}=}3)BQK%x=J3& zw||9FXx=InYs+Dc7B_>YXx^=8?TX!fV_*%hMhACn`AqokH&N%L+O43mAX+th{5IX!_a-1E zJt%|3S%%c={MC3B4J4ZL(zEl7Ai;#@ZGY-XMudz1dCbLi1QDltx%cMk!#7<$Ly=nr z9ugyl1G9y2FQ}O2`Co1b;|&Mp(n@%$Q;nU8@q&FuY*eU#4BTmk3I)z;;?&BbsQfA$ zOqnXO#=O&TXZfH!i>4xcN}PAlT*!m=b%OQBO6KTn!`Z!+fZGSiO zcpHolh8U6VnL$BGTCXwR3kR!=Ki%r^cFJAZ%0pb)u+p~dG(1V2q?{Yv@H7~GSKv3nTDh4@%yhiX$2ZrPgc z$QJ8Hr1O|wu+A!iRjV7O=`SI;L;tb9V;`}t@pY0761%mf_R97vk`7-E!j%^A5 zt_25U9AmN1?Aro0h@uTk}2BMIBVlPkzT_Ie#F`ro(n=H>ZN7z zaNEksoP7j$;2FKGj2gk!juo68CC=SRDG5G$+hP&l_FB^W?iJjrXyytT$j7bs*L~9B z4k9Ymxg=Na3V(vD9eJ6R+7Z03I^_Uq5#FB*F0xy>!F}8KoOIhyxF(jSU#x0?)sstm z=wd&@A&+hBPR?dnUz<^P-d+j2XVI=b4qY%jKFr`|a}<_Qp<;)fSONCLg*&vP*k3ck zd$xuIc_E&RnKKhm7w}UtUlxU)MRvqbUm@(uVA!m4{(lEFb36Y^J?#ZjLefZGmKOL@ z*z|OxM-bd7TDC}V*sX(3yS#co1`Z{WKDIS=sEPd3W;C>l`_atKCAUn#DjixTPVlAn ztX@6cF;E3swd|6ppLyip3%NaNEH6NvI4~-@*g&4P+I%^C@HX*(`sBPa(@*ZLT_j5{ zxWR}gTz{=0o}XM^;bn8}X$-kh)HBdC;tx4PiJ_--&#Qm)PF}GePPDHn_&5Il@BfVq zAgWAz*V<|=Vh5?0`>F}Ic!ocV-1$O?Ov^lJ6;7=4`E|OgzsC?LndUelb`$||`Gd+Y z=W%(?q4y_`3w#rm4~S8F!u@&RHCm-)xZkq7E`LktLA!)-!=l0tn6(~T0*=Bo$(f4j zvOP4yPIQSM;DTY%a0nBx6LjU5_qZ20;MA13@Y-%#Pz+A}Q|V;@Ju5i|S(PUc|HQ^N z+xQhcvE_jQlFbmV`6|!dp@#hru|XRVR5r04+=a1ONan40YcP002M$0RRA&ERzrcEVJeo3@sFowEzSF0G^?B-wgo( zH2?ts0K<5bkp&u)AVdg%caH#}o&b0O?7U}G6kWS5N=7jNf+C8D88M(JD5A^;1PP)9 z$&zzU5>y01f+$Hc2uKEzoI!F9qU0z@20>IZis0$@+v8>LGwwKdoS*l{T|Gus&Fa-v zPtRwqdeZ79kVHk2P!LGSv`Hw5+Z!a?2qYv&Nl1w=Vk0LZ*~X}UYj2^e{Xkbo-P%y! zOw&YN(?Z>VgxG5v2^pF4KRt=ZR3v1?-d~AFG{hs~IXopHHU@X%8CMb-;+EroT$6;- zUen&tnn~ako0yrcrj?1j56-ixrRbubsBorhhq?`Zw3z{U(EZo`|Fk8LkeL2Q2!{Vnh)4ej@eiZ_w>SKc zZvMFme*Uxle{1B^gl_x%h{K&Sgzg6lJ<}qN|Lz3@MI`=zEHaV{WR&u>Xblxqyt@CN z3;nzQe_WM7LPAeMNtB$}$hSoJAKl3Q6{lk3%jvLnnplBj8+_W&)MF|QY~~hEW<>ro zApS@H|Nrp+0}SwwFwgRJ%G~E)rKK|R4G_lmoz`a(1TwHc<+Lp~lLL-_*fXgUK8y>; z4jwCZaKsgVR^yDImm~ngl^Aj|HQbapx^3EV6=IFQ4Mnz>;Oq{mgnzr=+=YR3I}-e(W1cI|OMiz7W5LDF;xnQXXfg+piIM3hUUw5Ep zq@=`u^=9U84=z6gU&7gN<~kZ&8@hbM4HJx{<5Or9q_;vq(0PgPMC<#b2fh^C&`>a4)$m`MhOmxpe ziQnJgl4>FD2mOfIRAGafiRH^3(e6-hPhz}dcH45>=iv-p zy-hd-q&}tT2wei5I@|KSmDtUoBX%N{I@}#PuDZnq(L&HU z6}f#rC=A;7j+ZK~tV5gHR`kb{PH1Vcq))J#Lo>R&=GTfMG?EfTOn%*m`r=n*zn9cd z3%046Vd%pBbz>1ht6`|zu6<&E8yyU#seHO{p1)9hV!gBTp&sPNoJD7}0wJe*`n;pS zbI4|PA6_HRf=n2P#wE*P+%*}QG7y%6geG<3SlUmB`_ryudnQ9H-qKoNY!D&_F=Y$_ zEx6golh`eCkzCB zRD}rKgT4@tLTc7L^d@uWY2ve>cZi9)Sfd`gJh|^b#!NtmCdOIYwiVjEhW*v%3eeJ9 ziDlp608Mj;f(3y9XqfQq>#2MLbqPPF#)1wM zG7N)S6*VERma}(y>?~v^eq84*Zh@3-b<)zW^SHZouzO);0(ZK1C}iJ%0r7>s?qq7Q z5Zm5(meOAZqT7!;e9y?kEtSia)*?N6w95l4QC!&kc#1eywDDE!|OQxowqgjyCS$hr{CFk_b~`7S1P1; zzk%rO3wk?7d?6+KTP~c>7;nD5H{; zV_abHW1kA4d?$>acM4qEeg(!A0m0>chha*kNmG+?0;bx3N1LAA_yf~IbN?fAdtiFe zc~30YCX6@J>|bpT!q|7WKj}>~7)6}6k=JH|q5Hd{2Qv4GdX)2gA-)0qBQp0(GxtO9 zSns(B4hrb}8o1F^)(`ELU(COpyzlz4nOf}Pa+vaBmcrru2x*pSO=4k}2pdW$tChE{+ zOIvw+q5&Fb(=^VTCqhM5@pwO*78F_@9pp5BNrA$C`F$LzN{|=Zvxj?33bOW3@N2LE zGEDmlf*W5$T9u;4Fz_^_hCV8b2TTzC0mt+0Z;fzA{=-M1_)Lhf89!ig@%oo{@Rc?_ zn=)Vg7xVvSq^H-aa0Y$gsiA21FF3~3#u;g^2(Axwbz*GCfvm?0s+nsL4R)HO>SBR^ zbi`iuLB2F7q}aR;U?D#L*9tY=2xZWYWnmnec?$!HBjuJcWiXKnQ6HGifcZ4@0L3e5 zSkWfSe0|Xc>#Mt~-n)_E0khSn>e?09P|Sm&)dU+qLMNoB zY4qh?Xj`$eES!=jKF?a{ov)w6{fIvQ(f}T)RJotch&u@7#vRe0r&FPzXJmhW{Bbtq zO79+aIC=~B28A4H^B&@!Fwc`(@~^ljGrS~4!2%gxu^+r-ix3Y~w+c)4|Ce_V-kw(D zX7v3R^Z(UbE=;_afxR3F>f*4&$&DMkhiuM(C+5RMSE?F>7&!uMcKwB9_@17xEcuYT zbfP?+`8V!Ax$`xqc?4SV7w7YT&nQDbmR7(W>A2%^x zy1lU1Y?v{4kO{lL2@3>QKG>ZRFs6DW0o$u>DNbtd@L9&Kg0jP)leG9s zbW-saCYzefSGhI?W(VyC(kCl8lQthbc8vmj@Umd$=Yhx?v+UKHW=OX^zFniF3?(

|PcgyuELK@a2Ftti#e~XXC?R<1JJM4!l9%{qlgh|96HD{PseCd3X zc@!p0GGsaaJD^`AEq?B#8g$;0awWZcPxJ?LTVjqzKwB=ql3t7k8e`YhatvQUT|KPA z;(Qp?4t$ywOxz7sezsp?;yqB63fukh3QWw(#M;p+(gMg?AF|fleiUkj^3>|w zpP>`ic(ScE1IDDAWxJ1s!ID)pp->#eLC0a4%>Zr{4vsaC%X-t9+pWn zvS{IH*<*Eos^bB0b~$(PPw@y`?z|*R(bk5GN7=1HoA+=jQHc=GYKDv6G1Ix)3viBm zvtLSH1kU&ACi)Z^@U(K0>0w|qp6=P1LH+tM9F@OM&R*filY_cmUi&=oxc^M-TyZ2G zX%Yeg<1fR$+ti`HK@fJn7M02+4zLxHHux(`hX)^jWQV=z+hMIQGzznuupCz3KEw4E z7QHze4ndRx)+Mpv!F7hZL3|ieP<`+H| zL2FNM`cT|sXnKGBB-p$SnxX!#7G`2KG3e!IQW?|^ab7T#g- zn(h*R*C`yepL`lTDUQpuj-?8}svuOhUKbL-1gZGc->X*UP-^Qsm*d(4ttXUZRbLcg z&=W*)*!n3f2(jUVt0{O8_A9wn$PW)44DMeI2*i`@34^>-i+EaG_a&hA4_tm&I+#qT z!u14$-Ul59xY1oSS(&^Icgl3Dz;~zMF3#wGbItt%+@JqlXMau(ced}|56=$5jVJn) z^9}-BhdEtoe-*>^q$wX2g)g4T#87#;-GvJ*B~*z#Iy-I(duyJ+(^796^8Lr*l&7xZ zC_aTJ{Rc!|j`YDnDu~>`B?FHtnj)FT9AUo^-k{ZX6?S(w=q^#Q!RFx@yXd~hcp#jA zdy&sil{l^rU*Pqff$7ICZ&|OmFqxJM(0j`Wx%L`*kvxJe(Z`{i z)hlhNAq!pltdZ0O1?V1{BkLUTgVx-^;pCZmsDyku=25V+#k;gPrT@12nk`J{G~cA7 zmx+@%u6}pAw2Z4KEtE^*lX0t-!%#B0|i>YTy|>U3Mvd#?Y6* zlR3Oz*Nhp@dF#&Wc*(-!TDspG_Cb2oXLuu&!?IY=Mn3kX0A(cq%?J6lxu+liNBo4HDNq#zMaplb_({2)?-b( z{PDnNFW2}mah^|X%UD6ij*BZ!OF?!T4Cl_Yc25z-XIF zh^mx53~w19I#tsRLpkNIcGhp9ms9X@zWzEiympit)x6o_{oeZ=3!6}(EzB^etRtwg z4;;0FU)jaKyh4ODtNQD6pQctg_pB`{(F*%d7~HiTMisE&w{7$t5V}LS$KZ-kF$?SHr&U>HSLbO z!p)27VC~2bxSAi~Dv5w?)IQ%vW2yEB4>Jpi>mKg_&kfCP-B` z;ld#${Vyc~5O7k~|KoHL(pM$gDwhrYa^=ja82CL(*r}Iu60ba1 ztbVuDo}2E1~LSX9ZGhF@J+tgnbEeA9SV_a2dg&;23!NAwhb@TOUMEb~PKFP)T6&&2MB z=Lq$WoZ8)Z{z7oy{>ce=oGUxfp<4jA1bQu&wA#w zIi5tsZzz5*!^7HqI%@U9uwTzfoB31>d!|)wM_mGJsI_fx315V@TdUsdMR`~i2A&bF zx&y1Te*L?DYiwb~tybXm_W&&Je(HMvTMPQN(cBkfJ+^pPt=otgS)|{>ERD;epRN|+ zl5V>O8xBC2HlihB)Cu>B(wB`l&Oy^QW4}edKaBUTa#8CO{deuMXLs6z@n}a|$|A2E zoVOoozHwa-Zo%<8FFjz!i|n?DPgnKedq6(9VUi4gubz#&Es`oDpiOAooW*$rF~lBL z;J$!h>X}`0mmLs7Rq#b&{uV-R)AA&Bb1pVn>02s{;QdwDJ!0aOj|Pq6yH|2j{5t7#%$S(NID)G@)&C2Ni$bP~RM zTEew|HuB(YYwZ@bV1bt%yF;xM`{DV!vu5^tD)BjYeu7HY5bk$m%ow(_z-@=+p16oc zI9HQhHl3e=h{X%nDlz5`C%y zKiJgEu6;RZ3!AnbQ!=|pVg2HsZP!i-nArb+Id-6waf^4&PGjq*C$zTk5!nwlq=tUD zqI4>x<4ifkl&*A)p6r0){7sSI%R8Y<{$)@_$p#h@rMK?!F~aWN4Y{IDb2!NjO1zow zfvcnRAI)KI+^KzugjtuAS6_f6kofo`NL=(UHySaw#IDllBK&Hv_^Hc4R(( zcUwfb!+weRCzW`emdd3o9E}JPo=A>&QHZ#Go}}-_FTAd^<#>K+0pXDv)!pxx5Pq&; zQQc}k!UT%<8A~i9lv};P`NdZR_m$&s7y&^O<FKd1G;KU91o@K9pTbFNyH?8wiGKMI;s4SkA%6SLR8=4Y#uI+@| z?KoM+DQUPoXDC_OwhZUxi*{^Rv*0Y{9^tmY4@VwFYl<(naG>FrOG7^p3cXv<={S zqPDa;(-?gDOuk*~_=|u$D{SLU#PR+_a{lJp6vAXF5>@Z@BO<~4QptKUqHH?c^8HBh zrt3z0#n~7{$JBLs7{(&Hz5B6$&QE(p|NU#)Ir|*Z85gt_P88tHnn%-S(kR|o>eMQp z?nTtSjpm5e7DUp28Y`MLz-w~{8-LH+2p0>MvLShZu%pgjZaiZ_NMo<^`0O_XYb=PK zsQHZ`o-9hGb501PK71oc>^%I5`9$Xq8@w{=CYqSMH?Py$MKERMs4wK=$@_dp8LrbK7BxO?_gL22-7o@ z<@gR6mF)>x=33C&JyQCA={+0FOcdDf`W}H@uq4G{y$n1Zk+=TtM+f(J!iU37n!`H* zOn%8z@UPc

XR7Lat~pY%B4{>+K&2XU`}g%F;+Tua5>XS8mRGX1qbHn0!!fobHi+k8Ri2t$LA-HE{SuWD;%R8~f8RQXI4-w;=1Dsy#10o+7;4c) zEVult^k<(DV^!)>`S%2(?dWbuUA%`kUPoZ&l7%St5357OI6C6R@z9%D{Rr>*6O&u6 zgfL5melKDjB9wyN@uOo9f(^fyHF8)Ya7^YKlc6vIcz=>fE~LQ!2#0ZbwFrEI(^+|K}q;PwCxGg@h8Ls<7M%sw>`I&$tJze%1T+->C z`HSq~lGBrL=Da@9k2Xlry-ds>Xq2zUofzKY-Rzm3|DkiPTlhTB85{ZfMch&<0hhz?t-FFPxR((S2xm;bBIiuImr#tOj8U&o@WsC zG-`Fl;SC}hHOP(}tU~x_Lt91W+X$nk5^tt|ijZyhz83G=hM@cp+M$Cx;4k&6{C(IF z`1S=ZD<#grr)Oy5%xzM57tJ0H?IZeUGg{didqVMleEYYh14_E^puVJZguM&yaxpAE z$&GMVkJ9b5 z>*L;kb>gPJh^}MU|1hu|aULahWFL+pVZGag{*5Y<2MFSOql=JY9JKJZP6F@3-e-!2 zRwK1jV!q`>4$@Bix+uLn0cn<8Q{I(@NFBZDvJ$Zy@7{)Nw;M{uyNhmILE~#kxf&>T zLn;$*S+&3AR)`@f`-m3*sZU7Ex}Q|!HjIRSjJkq@XYL~YSxc>x{x-ykHENVUID%M_ zIK|M9ClSN{Z27^*Ttx5FF{NoOMdaGx>*rr(@j75xzbj%J!Zw8)&4VNnG;6ru^FbN{ zc2hN+I-mo8t5D;_SB-d8*WSHgZ~{Jme4_4-orhP+16HyGQM`1Mn&q4#&YukHtFq01 zj(Cxq;>#P<4v(?6!u}B(IBV0~SPanF;@zggtA%3tzvFA}FI^%%q_vQ=Y3PcJQHA;% zhhkUHA(&f4JzNz!ghvl~erwFn!PPZ!O=4dJyjA2#nm(l?AlD{tR|z}9s-8`AaNI!j z-D>rNqAiGL7kO=%a2`o^W^|fPRY);^JeW8u@fc|X0_AtRcOj#A&^7u<12X;jj+s7{ zN7fzHYt!Ck$Z`yBv(nZ@);kJLh1Du#-Ftp=zXJ_2L$95?rhg5YcdgFIrJg~CRqyM8 zf(uALyqoXdq!dyOW`$B6De>;oyY@{tQ@q`H;r08;UL*&$h{TF8BI)22VJBjLo;P8v z{$xx-5#kdszSDXogg8NW%c#$Gi1{%zUuidh=%+mHn+1>XX8%tvdy`ItU-?uX65@v7 z#@!-&3jz_m%h!gVvjjmPgb6uo!T;GFs~?k^cy+of@T*@f{LGp>_NMB?_cQggk`Q0` z4s*Q`ue=B^=O^0yWZG~o4R;HFaoV$`hlfAAk1cZicmDO7^gBi}T75_tKAAFN=!5pP zJ02r{`(ZV`_t8toD{$&6BPAX3#q)Qe&gF-V@hU<0({;HFgk(Gnf0R3fsO6_E*FMc6 zF05-IGGsTBue9wC?O8_Z1?S@AXgXvRJ|At2$U@fK(K=#56gfLZWNO)eVv#fMVi@)# z0J*g~_nd2jk-N#$RMf|gJg$J;_ILY`+c`K#r^Wq*A&}ko`@}+D zKeBfjPB%S>Ll)&yf?B>PG9^li>W9uFW8EqS`8V0x!;iRpfa90@;2h$_p&uHrXV(i>Q`i1D&EMap0>Za9j~tm z9qlX~N4O&6=#eu(n1;IUc!xZK4;;;`m6JxGgP(E{M;!uQmyj(i`XGQL^;rF83j#>n zcMj~Khu_9z#{0X(c(!o+b5A#xEj>6+U!M^h7u&*k+jHy9rALr|lNvcwuHy}THZIF@ z_t&s3(foPd=pvpSlOsryiNHHqcGl}w4uZG~Zif7PhzQ%E)%kDwhz;{>`yp}-NnUb8 zJH3t~ZQs0Z@|S1GoRbqHso92{9k-MAb38(x8@ox-!4l+uuFE)NG041Y)RRFM^>E>?NFADP~MjU%L;$WTi0=25(e zG@k~(&zv-P=fiG$SWymd6Y1?4=ogR_KEk0{Z~FM<|o2v_YE`{U7wP_s0Sec3?>(Qsz+dO(~v3GEkM>060=>!pGr zq91*|_0g_tlkhfE_x9Fh*y7zqry?@#mgW`~+M&Gfu1YVI;*RT<5x>9jC)F;)#{+mQ zf9SUe(aw2)kotQI;1tmw=MPNpJ%=!d`J;W^-H2fhTJU}M6p0EOCzs`{kSgjDLA$tw zEIXl#`t!`lm5H%rE$Sxne%eWrnAa%kaB%Un48eOdv-dprd{Gj&C-QuPD@w=WS98Oe zP9C_~xM(O!Lfqt&&TK<}$=(M-p_-d`zq{n(o5&;-P4YcT zw$n$Uq4tpH>I4c@6sAY@cOn0FFY7CU0`eGKa`$%yBd0l|sQqynvP;|B2(RgpmBJc8 zedH%HZT%J$MBgDj?x78}Z7)(exJw>6_9Nxl=5Fh{A4rtf-EntK8*w@-Bb{vD5L=0J`SS5N$W8+;bwOEyz$0{jSN z_rP<(Xhq4+mPikN$)M)H+|RwVuj_Md1$aZJ7dknqi{?n_3f7$Ht3l>E1x*yoALOnk z^f}N(qEJU=Qmi0=$h+y`j=K{mTYuI6O{@eJ)`~r>5%j1$^Ta(w`x7c>B5ppFDM8hK zxtI$VUZ6_QZP;!h7nKuc`9pm2s64EAQ(7So6|D-|uecgf!A0_2uXPw@fALx4#dpdllaR-J+@|P< zE^?9y6e6!{kS#{##VT8k%$7diWu6Dfh+etqAx4Lk*gvn_WG^9!RchFxLxwosnh(vE zjv>K$x_JJ!F=F$azC~VVL5!@RbK;+WdPMu_czZhCMD%vf`>Dce~Qe5^Q(Cbf03)T-|3S39EwhVCvNDl z+(hY3=09Jb^AdS)cWmaMLsdXcjOOxLR2NZ5W=JNZCh(=o19BSF+FzOz4lG7(n(0l3 z+r<5lpOS$7OVpZXJ~EcbMvc?S9z}u%oiK263f8-NcQD7Yv zB(YI}ypJy)+ZWFucUy6-JF%XZ-CI?1N-Y=}?<*$us1@U#NB~kDE>1Q^x(y!hk9`-eF+{+! z@Z@IhCZeV$yIoS2k+|1=b4Vf!X^|{m@2i0vxsI%P`E(T8Bq{ds5>UqZDN9s`QQ4!mOn@1QdDI4-Xmx_ z7uj)&A{dSFS*>%0%xG}4_cOCjMgv_2we+@m)N{lVZU}auj-5?hm(Bt;gGEk>AxBWX zuut}SlLD%#3Pe>u{i0!uzEi4YL8y zQM7LNeTnZn(GI14sPTzMzQt)XTenBZ9%cI}_mdHs8q|J^ujG*NM)1Nf9&)6coVd_( zCK@T`y~|u?-bl86`1ctlJCalf{7X~Qkr>YRZs$)fB)$^gbztp1;*UraWL0iMC!Xsa`B1`B-)*jwa4Xq z1{x6SbJ;EGJPA@IDgl(Q@S&Qm>G&%fG^h1)A7wd(CfkP2y1X?seEt2$yNIaY zk17OvwE)y*_xmc}*F&wzV8}rJGHUiS$N8q8KsAY<`ijXgDo1IqJY?U8iumMDKHBRj znc;vo!BfoT1Pe+|Go2tNMi-H~NrcLqoEY{TQ`A&p!yQ=_mD@3M!2Br8csQLC5R`~9Jxq68Q7EE^yEG}BX+2XW+XiYWGbRR^X^z2_kiaXwSXs8zSN1-z0Mxli1GHSDV6ox&x(a2K}TTz*b z53-D^b}p^>w0K*QcC!$z3lv}YHxkg6(vFbvnYq=zoG6;E61%`5#Zs&jZQ?0jAo}H~nDHVX?Bn!X08COt$z{D!SLt~75;S0AOJNhBdQ1HUX;XTL> zGEZ)oUPIRTt2(oB8OYRmV-{3>9U19qH8mFdkrD1xEa(}IRQ6jixK&2X*Q7`(abMWt zJ) z!#mZ3$l0q%rT(%JMX&sdOIHF=k3dP{=GQ;`(Gazre^wnz z6H!Bzy60uq4OF#gdfxkNi1OEm4$}9f;JvE&S%)qQ6l)0YoaWF(k&>F`NvUpsP=&J+QDK)`K!%FY&jVS0b6={g}F({ub|% zf5!=nUq-jE<}H%*Yojc%yNY|qhQGt}r1P(1AMYYG%|r8|>Js9MRez*b?L_9b>v4?} z%E%v8drfj^2g(wYndKPMP`z<~tMu3>K{N`iXr3==#HS;>ua0~kL_33FOLl`9I>lqY zn(Y9(hcdI6ef`jTqsWz7Vh()*4owGJ_0TV=u63cY6#dQ%C+x}7(Vtyj-Ta;h{fCA^ zN@R7=`%__LLZAS>=0neHTCCCYgv+(ks}bF1-b=Fcr_jX|LtRtmk50CK*tp@DTy$^< zuIyQ_MLXSQndHrUw9X0(cf=pVrrMMiPn~Uh~sE-Oq@A zD5rXFqG#olcI+^|#7G|$OQ6S>UWT-f$K~Ehif5za!NK!3pehjQ$yDZzV3;kI`Upuu~(6=ws*u#GwJ#%R^Jy-XjyEb9v zT*)Z99z>_UI;w}*)QhfelSGM- zw-3YL=O~_0x)a|?jFVQbs(d21M^V4vwD=Pvh>3owWmX3P}6Z%m6(=!oTUknS}zH#uHgD z_MpNn`oa09L_4^-{Z^*z5!-RnGseDk{nN< z8B4~ntdIEQ(IkwBf8VBc7MjM$-yHI6`vi;@pC2$5pTlT>RlndLevJM)A(B|Xfzg-D zAE>|hU?g+w>l+;lj7Z;+OfxRSa17V|+Myfx@?O_D_~jQ2evI4sKAjey4a1LLVqwAn z72To2+n3O1<)B3sUXPv@g8>o$!|1j>dcw`~Cb}3!yKWNge{;vH8+uuvPNJP{%=^-H zRCj*DlcZhD}KlKji+ z{5w>gSomb??r+^uBWldQU~+w)P7N5=$2zs~$qu;5}7lx7~Oj{^jUF zeF;>jDRa8X+(eViY*~~JaeSSKXWC}nimteyK2`3+=#S;m0TbI>9C;)nFdoF#YdScGvC3^cj3=ux+Ra(eJVVrL#sT@~c77P{4SVluVToaK zt~VbG|6u6YgX@xe74i8@k9zS@74*NF{;DEy1idqLC1>qT&~r%7+~t%ry7W{;k2W@; zBUrC*f9;Mi+73rZ`w@W5pA)>d#X&) z)j0Le{6ydw`_rK>;f5lsO-PiM#4#?B9r6;X@gbMe$h8^Jv zsP}qbYHV+bPfz5f24mIH`P9$NK28jMpIaJ=Gotb3)BMeJ4<3wK1i8wmzQp+3`yMJZ z9hkbVn#sSQi0Qz;x{ER<`0A%DK^-=OnOCJ7^bg!IQ_xV}GZBrM0bdUqVq7kdQf6_2# zpNJ0}bqT6QVQ6o+=xL?#M6VNDU4(QVhDM{loT&=N=z6CoKf^dCHoIA!=BzOtljzXU zY>U}+E5XGXam?KedD5Qjgzv`Yw3BMw_!0WH=I8r;_)(#i_1C8tKPLX}4YmuxkHaNw zzeToTE_S*z>Hb~GrJ=vTBiT;m$ehtXJqxt#@_X^6Ai$}>U_)(wqPrBU? ztzDXjg?Y2k?Lg+knGl3Qe;nPr`cwd;rrZX{1=27Xw~KfG@-58p=(zLl?ZBMeK;D(d zd+^h~JW2OqE9Uim9K=6`V}X&1D(a>t7OLw??F_@P(9_bwEO!SBGs`igr7>8r62GNM zC5(BWgb7~a`nCDvDoKa>{@|za)9vjyzu||#+56+?sWB(|pgo#&e$9ukpku zCVsaCqW)SQu~9L)e~00NRQeCGJQ94^rzAYgO^h=Kbn{g`4TyQPPa99yvbK27=N91f zbC}%1jw``r19bZkSnAleR&x}IdVRU=ZBLOmwX`{*aSB!U2B&IwMBsz)IfA*$QFI=p z<}vBJg8pv5Z>9thj96w}xxM=eCRS4+w)hbnLoEO&`M3g!;S zQVsP%s_8~737*wHV*3V*+A|f5r*B}vqD_3_oKD!!~2m>M=~XO*A9B%_+KCp$UD)rJGyi0hU{LsWO` zrm$f|;n|HcgCqF-mnYKPcAn@rw;i!}okP!{*VUO-d35)XQM22oqa#_aEQg~3?X)qH zCqDf~+kvl_>|}h=YDCd3YV`=MH$_B8r{nRV%Ii?^f7AkM@|-Q_8+UH;UKHP_moG8C zg!$-%D=jo7_%>EoV=iTFtf3K}KHb-{`VXDc>pR-E_6It=BS0heh zoX2h0QOyv;qk`w$yG1ZaHmgN(-wp#sjRS58#5l*5;v*{?1-hHUzn$#NM(1qC^r_l4 zbTnyt&Hk}R`JAuy;Oc5cmW*DPa zp1xjgis_1=UiaD}{9v=i`xJdF?)iI|Wq=(kG_2}Zdg`$j!T#pq;RdYxiTEe6J;%oG z&OnpPE7&~LqTuYyj=!A3E$)4k_{-;)e;YK;fWM+8Guw5euxWne`@{x6{!CsMR(R`* zKgXZ$+0cx_@3ASC8zv3-bq*fx${tzr{ZLlPsZpRY!D+Q_f zMJ&9bS9@Nqg!w1ie(k2dgr9fPPTXbrfbYzeq_#@8Fq^-p=}ZI*zVf$uU6$X2e<{bC z!_PZ)F&4g1LPPcf!|mN=nq50Fyxa78;KM8oWnTS768-}H3YuMf1s3Q%l`->E%^Kb9 zkN?V8eI`AA3sbTLdWi+8J3qMXx9AMTDbali}#9IKA+w_FShWD;PA`O ziNC{%sg3a1MXYD$uum>HU;U1%e=Nrw+Q*0SiQBS0C4B|GrU&~;K(;hdbCa;k>15<$9O%fBE7Ff#k2m zF=mrH1QO5f>o1c0@b{J1Nh7M4*qB#peHodH^(CX`zgI)>+x`eu((4!aMas;c&K-hP zodoML8GbAmamKBxhG0pHIh*-W4Hni*Qo3entM;l|-e`55#M#dQRR3y(gD8We1?XFXcmoTKD5tsID1_RCK?$n+e;XJ>SKCLjgpytObhod@a3y1l_iEXzUD^_ATiCbd z6GM*-9};fZp9yXeLjFkc*@QzasFPfl?0y}J_OQ~5ET;;54tEZGe;IrP<03tJCPF(f zvu?~JeYzg=k2}UCR6b*QaWCzR?Ros}JD>bc*A1Jik$2t3lL({&%dQbzi3D;ngTqBH z{}3o7WE@Cyvg%T*U1JCU!+ayqC;HG+yIDs;4dhL(mV*=&QRZm_VC6F&Z zulT4DMIhG}TzZu!e?}m4nEu156g={q*q@4p;% zP5^O*vhumU0sIn9lr>s7iPg-gk)-P5SlO26ws^4~OX3%I43AG?A@*3cy#zIW{%oxw zP~E@}%eNW~UiUG>nfy~o`5h(=rDLQ`Rxu$fB=9Mi?=a(Nen|iZ6q^ z!V67_@d}mHdE}f2gGYiKWtbUlNx#{3d@{A`ChFZRl3X0uzDkZcj^OcMgF<`((?1_??Acj&H*ywwqzq>eYu`f3{)SnNhou zXxA1!^x{v^l3-zfnbff?6?{MVDk}0OC%zW{@+gmuz_g`<6-|a4rX(3S9s5%;HqDU3 zOPI&#e?|ZEW)EJ#t<2%iA1Gji@ zm~=Mvcr~|$gL5N`?lik1{bu917;6G5L<(oc@(u7Q#n5jwloI_)yONlWYGC|ogk>@@ z@B8ibu+R;}vuMb_{hs}muf2L>5j0CcV1kdl}@&wAL{L5sU_Xyi| zJW~GR^MOFq;ywJm@)cp{@!i=2?oSE3b{W`nsQxdat}~wN_j}uW@4ffloBP=kA2y|@4S|KR8Q$m@FMb)C=qzR$VNbG;e_!5Csyb+ft;Rz78 zl&9z8sR0rH7p>m08Xy=AU3fxb4)~cD#hmgj0q?9JUbB}a;H>h%Q3Mw{D$cN8{AZ62 zu3jUIu?t6gICCj@5+-Q7VqC$-z#47de|Q82zmCv)P3vSS-V$2TI?eW&QWGt>2cC_* z{TR(nvo$%Z7NXf_Wbqt_!DuokD~&Lo1C4)}{Hk}{acH#`EK(L^>kT3>8cnx=Dn3ZV2HhZ@EE^j4Q+ab7l(?-soyT1Jcqv7e^*AD z6zidm?v9IOBoXMJ>41U?GoY!XaTRH83bed|>rqgji8g~Y?~K}dqXTNmY2K7%z>}8w zI-QE~BAOs5s2L8V{(&++*Oh_7R7xQvL<*?yJ#rei3IUq16SHR9H-MIeotp760@{a( z^IAApfY#@lx4qgapmp4@E**{of0}NVNp|XXpl(&(<1Co~s_(symS5t46486ovF`zS z3VDiy@+OdG+1+zUaRL%?-IdnCG9day*5;b(4TJ*5W>V2ffFJS1oXu?n@U-aKzjvMm zoN>xO@5zbK;Xa!xj`tecH&j6_6L2Z@qHB6 zdIj}damHu;&Wc8#-tp4^e>8#Sy(+5Xjb@lGEl)BERf`$i?^UftC*t%)V2& zro9LF<~ITslE(qJWR2#q;XdGqUF;SSV?zg^&*sl?fOfaEH)EKvd*q_MVP(}WTEm^H z9Is+WE9*sxG}75LR)W}P?U?wxpnrpt3fhp*?MDc_YmPg#C6 zMO>>#sfLHf`2u?x6Mv$?f@=qAY(&Srw?BU}A;Btjj5AOl?z4~@tgmBH!M8_@TCQaa z-sYo21F^WDhvx;+B(CoZo0ciG+!cSBXlw&*eLf(Xaea-Bf3BRGX=XG80_AlD!Gae+ zs_7;=(Jl#8xn(yZHN1c}L1=$KnjPr<1tvD*RDtp3())0mY+y?IP(q$d49uE3EgsLR zfO(LYa5PH{nDbwqIed`?%n_w3Ogx*wR3g9gF5Ckc?bnSB`v`y`Blx%XlR%(<^rvI+ znm5p0XAYq2f3O5vs#tA?b^@Rw&>3$lEdeTATEi1^%s}3kV%Yf!yKmC@Tg)})fq0Cw ztU^%%h{%|uyAzUuK-lm^_LV-svsH-pd%*!XNe*R#yuZ=m!_wv_ZF*?O;I*qtQ9f30 zo@o6SdJ3(pg@4V$_VKJK+RRKikD+Db>)v}0Y0$!Je@3^)lLVM<(`-)Ok%eZ}{uMm$ zBSAC8_g*q2zd@5Xdh8?_=h45(Si6aP+Q+%;)Gdy;8;#lpalP(?HPjTM;cV`<*kwE(M9*=%3x8nB&Xe{0Of2ka!T zGniN|0ek5LNz{=yu-}Z2up1KucJ;=g)gw<})0|gP=#K}Mf;UgXSAGHWclGBE2WG$& zbViW+J`FGm3bPJvCIfveUwPxp>p*9oS>V_ve+RSzPai5XECO}?`P{MVQa~xw@c5!3 z9gvG|M+r__04e?_Pij7QAnM_3s$^sXg2o1`2xb7hs(|r@+F-y{{8Ow^w2Ss{YH)_$ zKaaNA(=uzg#H;FWR^t=DbKwhF0a-h;B?;qUDGyc=t~OTI@C=9pY6+3!-*+ zf3`EM(EI}{pPoopH2r7zI*Y_9H1f9n1NFavW8S;tO079d`2Eu9 z=ND0@!9UMXCLc8V+CiH7EjwDQmg)Jb;ELrJf9WR``T*z7$;hhmFF;s!@aL?Y5RgxY zT|76#1+=yT4p-)_fRRMFva8q}SV$`#e^kqQ09({y$>-+}fYX9|Z%Mr#xZ?Y2$J@Gq z`{MRJUc*)3Au}0i)f5EovZwPSb9%t-GK^0z8V_8mfhX7q;(_DJmMEp$60n~jpZxrR zA6OfMx|t@rfkmdabMSm2Fj+*uJ8W+OhRIJeM$-jA9}?}U_|X{X$YN1-|ykprgUg z+g1FrXm{0XB+ePjvy5{;?k|O)t&QdBwYMwS_whtZ;ENerwLGI3ZG0N7(DzC?X*@#9 zT(jeT?we>S@P0YY=NUBTc#bRde?6cv=V9hIcvZ)|_kO&rG&)y%j6aA8es93^)&^Vs zU+zIdsONVzF1V2Ie`6;fLNP15x532v8k9N;Kl#obft+Ge|U=nJHT*^VIR2Ve{wyU z83Ilh-8eR#ufYCzB%!fl2UxEzzm=DM2+U=}b~C3g0uu|A4jj4ygH!X5g6tHa%N>}D zSgQisv+)t#Bwv8KK_jPvf3g!Ortq(wDslxfIW`sk*6%oy@pR{(?$iOA)dmH@Xp zp!Z%tEjrkU${3nVLi^eHcU zrOfJRKQa!j7>awubZ?_YT=S)*qKjxk*{$Eliu#!M{=sL(tCn8Jf4EqT?G@XoENV8E z(T*O*M+2*egWUM-Xl7w*Cz4bEZJf1}*v{+$9EGwu-sk_YJbtqFj0M1)o$zi^Lenu`|;2pjkZTHLz_;Ww^XVrUxfV+svWo=6k z_*=dkO!yN7^?Qmve_CFFz-oBEM&Jt&$laU^$9oI>ZFBLKEOEdW&C*dFeHnP`+^QF^ z-2FTq;Cu$d5O zM#&2FF1=C7^yh)Lo;g4A(H)@SZIs?^3-PxfrgcS_~3giUpLP4*cm{ZitpWd&IO?L#FfI!ot0?K zMy;{r>?^d|C-nQR0|{C-S63&DNkmhW)_gH6|J7}SlxEU?{R+o%_*B@FBzNN_`uR`j z-#J!Y^zVCwe`A{LNwj!B?w=^_1++t4Br`)W33&Tk5vtt}fhH|s9gX7?r%$j zG`E0nk|?#NpcQyCY}MV)(F2d{zr%Mjy}-q_y*FGs0_;nS%n6{_BXcQG-Uc%3yNVAOrGYTB z#a7hOe;M%PdC1GS)v!7wmQ#3H4{&hmcn2piJ+kY}})`}1owJ$jEydF?;n{a>}xgJ2@h<2Y2b5kLBQksGzY zs^e$9Uxr4DPoLOJP(>>qEOBT+905&cG0e;f7n8zg+^cvAp!( z@5w>@66Y;)mqZY+A0gwuv=8FSqCs@jM<7#81suF7) zAT9~bz14x$>%(8V<|m#4o{zfKzd;+of4!Elzg07a4mgTERo?zZyP8VEvC|7^Tji?J zL`^T+%#}EIfhQ1c>{I^{>m|qZtZLee(iv#Bpq%mSzyEvz!;((fUOxZjt<`VpU;7HS zp{@$v%#K}zCN`GyPEKKU%k_IB=hYn`9U@aMjgbc0?e%yy`eb0ff6X#% zI(Y<~ed>6llsdrYs1PaC%K}1#4|H*Yra)vyWukmS5X4nqyVEDEgJfBo%=sb~kjg6E zEOF}rY2L!ZIU!Pz4wH7Zx#S@jl7fv>D%oSwB7cnwP$)D9m4cgCg3*Y&Z$ zQ8Hom;AS7Nd9QzHJnsW6giJ+q0`Gw_f2hz@>pIW}v=!Z1YzJCrjWROR5TFiFXk{O| z0;Em00%vz4f$%Bi>UZq_!{7GJ4_O@myf*w|;>rPZWLFbLG~I^wHTk4#f7;2=F3#bN zt!(W3Fzk-1qFX{+_cl8^UcX1{+A)6uBM8y_b=oX^mq*9H{}I`9KW8reS6|nqo|Ik= zwMBgv8}E1j>0x?G*tffKbZGm&&!tQ+7QkQOAEL4104mBL-8`)?z|c8Jn!M)(>|Dt7 zU?2r}6$+Jlejb9L@cn|zf0y$?R3}hi#&{Ye?%5KsGxmZsoz~44@2`T)bZ^AFb1Wcx zm94|v&lhCVAVCJxEo56w39In!K=xuiB{oP3GCQ1fboU>C4DU{nRF*49t|YLT<%ojB z)mxX;{N_L`nE#g*(O)VK3$pK0UVoqp)Pp`z@}cCy;Z0K%xUK~dT);aqjXsGQ*R=m8@pq5iXsZD^JC0T zyVwKyz_KlK%K?xFoh-|xmgEd*i_TVXqsGuZFt6tB>vD6mk5I}Vw*@QLp+=P8$odfA4Qd2U*L4ywJ&1QppP{s8@=@*UI{cqp2EWQQ&__pEL zAUE)eeK?(*e*wUKzcpNabPYIs9{0U?(FCmVv_B2{UIEjH!qrKx|F7qD@*|l?fX=jL zfd7dLP>xIp*t4Ahl935f=lN_P8h2Z{P<96hLuK^I>RSP~w>f~-QwCeM{KBGwCkh={ zoPh5{xoA(jF}mZY71}k5_4VSfMVqlxKW<4)prw#-f1~Zjpkv-g4<7mQdFdbH=6B!7 zM1Pc`;jCUFxwIQ-B`iUv$^j2>o*k5TsLEsdT--||D>7hU+`A$r3BaD`XE7H$2fQ<3 z^pUIrAoP-QfZ!=Fh`(JtmAOFxGR_W%YJcfL&iT_iHXH`>>-Q@=zOsX2d~O`Az-v%q zFrE|(e>n?E+G3Ntqh+9Uq41MY-4G~dHYe(lZh<1bXiRg;8ptoE`?ySwg50%MdsjUl zkhQUBO8zbdGF+!O?^q{+WHtMpbbC(_I|@4$-jM(zt+n$JgIgd}DwC+gCkg`To1_GV zn!uNk!SyQ81$YVHB<4$A1FkI1fuv`|z<#&Kf9^AuKe0rUERXq0sG7l4+k+1|H(KHfAoe4lx!?P~C$A+Cxt6crvQ-JelO8af}Hah%m ze4oO&8|}x;kTM*7MSK3VQav->XzSs`jb-NhSl@PnqrbuBnD?>r!|jGry<_}yelu~w`juNQk z+WlDg=msjStdr$tYM|1I?{`Xp6O?y9e^u14oCRh5+t^-(a!_3JQ&xG;2ns5rL5%&p zAdk0{`m16WWM2|*oEeM(83k6pwb#KQX@Og&P96Ya|3Yq>cbS36LxOe*nwub`sddoH z@D2EPqB;6vt^uF+Z{9@1c;Ip5qAs}E44mgq7(doX0XB}8-}9dt0kh!M{)D&lf54FY z(q~&AA86XC%AVzr0{P!CowQ|UAoaDKlcOdA;(uP5oQt;szp%&s+oJ=(P0_SSIVS)( z6y0QRhzig_&eP}dtjPVy!7a&7S$!w#!3Mec=5eHe?Z0@So(ne>p3H63uD* z{?7mkd7DOCMSYNcKs8RG)(6t>@@K!$&w|9X?e?pWRv>0(+hzGE1%#WQ z%`g3Z4T7gKJ1tn(f&a!(B;N=*@H%>F8zd6}H#z9j)64>Uyh~+^-x#ns2^s3+KLEy3 zuR_t85}p-=rP)pRqrftrUX*YhN`m~Y`}2(*X_j3Y2XrYf7oQs)dhj5QBU*x zcR=h+8vCdUKFD~#m>Fnk2KjkEl1lXrPkW zp71$nX^vUYybT5|716v|UwhD^Vr8EDLkSvPQxOtZYC*jXCu~)r9n_d;@xzp5KzXuu z*0ks&D4lg2=KN3we+sceS*pq!AlL5CmK>)9GRv##kMLiD)Sn+zDZc|j0;fdZ@)IwJ zY6Ufgo>{}{koX79T6-W6uCyU{MHTo?T@{(fa|E7~LDo4-)xa@2opHj?4p{G%JFUv~ z1Jf(J*p#q&Os~3ZXl8W{sMKQxnS3aK-0hRmFToKYeRsosfALlk5EA;>=q8x~UL8kE zo(k6II2ix>E>H;Tv$ISGKfi(wM)(CDVtZY8Jgkh~ioHN9$*GrRmRXMDFhQ)Cc_Sn2 z823K1Iv}~qi59uP+_j9ULr2=e725q-K*`>mm2S@n%)D<$9hMD%mzCzEs7?h4E4J2K zRFZ(?>ol^0f0OAT=XJB*)$kc8=`7HWe@O;4l1i{jqXvz(n0Ju^_du(c_TEkHYJlr6 zbkb+;0K~VZ5xzzMp_&B|&L03Sh5GN(a{)3EIocN|11$!Hd!dazpuzL#$)1WRs1`lG zyjPhG%CnS7E`Jw6ar60v$p{@NkS9^7y_f{qLu<#>e*{&Kv3TC@mb(U$k4ub;bgV%v zZu!FbYHJYA32}Py#u5bY?kiCE69BfRyh=^aA9%LUao*)#1umS;hTB@_fz87^JxxLq zm@n-3w8Ue&;iSlmDDnqD&GhIQrA`k}Fy~9(d^!hYni_OVx^h71`6%yuUoYUZmv%dJ z<753_f8m#s6}0Fml|e+{%6W7s+eEy=M~`;iT^%cX%8OP?aeKc__#g8=!4^{EbIalw z|B)jWHc7BUE2mBNsL7fEcZ}p51zjD`#Q*#|-A@8+O8<&ScUOS_q+AXCMm>lz@?LY! z6#|*3@0e|>JF(xV=nNWb1eLOW`ka6@(D-2{fA}DN9?(~Z!Sj=|0P}8q&n&4ydogzP zdvZDGkjHt3YxRKktl6O`TPSFMC{Zlw8wBVLe0lBSM*#kcYhAV_pv5eztHs?3>eqTU zAD(&)s@I$TRNCWV_s7GJU0A-LC`s~CRk$7G*52K92nYsQ;rEx9k8nY15nHs${RYI# ze^V{%7`8zqHHJ;quN8z&nJ(U4xq;<(3+>~xzkoN?rlav1rhko%afFTw0J|tV-MEl7 zFppak6u(vj`cq}e`HIs(UGO->9_z1Dgpt#R4pjr$3+vKGk4zxU$JPJxu-#GoxXfRo6_1Zch|>8CO~XdL{c z8m*B6wZ;nJDUJzH@m84k%O4;2m*PDaYy?`*5ek+raZA@KNQNau5-xJZSZ$2N}tSaoo{! zpkVtA_e0PCs8X|2i|p%w<`~&@N){hzYt?eysE+}i{-EI>x2QpP<@eQXy2qfm@PmYl z2?z8h@J3Dv>4RP}+g=dEe<zLCfI;BPgkdVko?Dysrzy!nr}nF~R|*ZyvU)^jYsl{LNaH4$X|*S4;UodC&8 zZLf3fzJTZgpOb?dCkWf|%;k9VfIu9J&A!kD;N$ausq7dC+&$Mbe=>6QfPI9#K1rVv zSj-xGSIQcJKA~Be^hzvH?-kw)Gu8l#m@hs4_Etdl^hMw1s}>*(rqA`D3lT3OFol(19n{!HK+iwB0FD$fbvU4&j;Y zN>&qBmC{N<-R|;0u*G`-Xcy%Q@c^Biv&MWf4WM`GgJKCy5$HR0vPODc0s{-3o=@dA zV4#pa^=A@5|JS~xMD9z_H<;v=Y)%0^oJ$l2IC-G6e&o*Be?tb^L5+zN-eiED()1AE zUjvP~`wwjDu#U;G`Tm_ z#7BXc(n!pg>ctx3>5sgUp%-p2xKCg>YAi%OHzm34yjsNywjk$>>x%$6-pQ zbl}gKb+q4~wQqQk0hAh5SB!fv0h@1xBuCO32;Asae^_lP2FZ}UJCJM+@=>FaF%6lZ zVrAR-Ep!Mp3w&RPC%b}nl<05Ly%NwP(|?&=$OQUD7qj9gbHL!6cCije5*WT2T%(R- z0z-!sHJYzQU@(%%IzcrA2C5q-e=e$lUaR+SJwFQ2O*f>9-FE>UDtW~eg*^ZbJ6qb? ztDu=uf6bZ0RtRcMy)oiLE}%kf>}hfP02Fm3H$#3ggWSP*T=_Z;$c$*Z5{QI?K#dnNR5Omm;03y5cvA!f6hA%c%laLNzu)K z!%jv;{~q(r(MZ3y6c$E%A+a5Qdv?$k?Z2+^f!k;)D)?6Z#N%V$MU|jJm&zR+Y^41pZqbsN1{LW&e>Zbz9e5=Nv zo2?+a$@)4bD2)K+vE=e+9v_m%^Y7}e*_O5 zHuwn{?RTKPydHPF_ztuy%*phOHWzLDwdtBj|A-d)vou$J{kJD^Bp|mvXM5&24hv4q zoFk)AfJc;5?v9fHbd>DIJhoWf$$nQxd;rsp!q{=r%K|_)IPj}Qwg)Ip-BT7b?*1Rs8Yr>uJ!*Fw?qGMGo-xL|SpJ z0%*~1cNy0`26e)5)xoQSpu)Blh|5(4io)E_i68~!>RtUL>|cS*qi_!{f5$D5cBbX!8$p#2CQ2j-aSRWz@(&; z_{K~dXdCC94NhMKitqNLM^QOIs^Q-FAm1H`ulzWWmVW|x>59>fT&sW+q5rn>YalwH zyw>^tsWI9mf3gr&Y=E|(f6>TVO>(34yIi+CoF~!TTwCSrIof01hfmJdW%L9eqv4x*Fd91|93+1UhCR36Xg|*e{oOk(%Y&~$&p*0^hJPD$UJq#WTWbM4DU@{L z@Bqyg9p`^06QGvw$C^E(4a%Q1zGeodfI{V1P>_NU$enNE3^8p38Me=SmwoL(0u}vu z-GSw;u_7!KX0;&re=pJ}=(;fQjk+4tKV1Z#TM0Oqn*D$y=|NSyK?AU~mTwi7!~-MC z@R^4d_CWKe;Z9fQS0L963UaGV2I9srmn7nFAj0WSUejj-+}oVhJzf9M;r0`S5p3>j zk9j*`$gT@*D{&*ExHYud{-fA@Vh63Rx&A0QZHs1F-S0$<;L(bBiUNdQY-nV6GOH^oHFWl z&ecs2_O@x(HyHteuAE-#GGk1iJZfd^*Z^)JH{OPxe_Oy#7$*CvIRu!U$eb_c(*S+5 zAJ;`ntdIS47;hHe4@hsftIzC&0TI_XpWnsvZ^ERGugiZNow9bF5^}W4UO>w31gi^b(qiv^#UH2ab9FbFw*>Yn%HRuM_S* zHoxQpf5a=F>iApufZ4xxjeJiF>mz@Tax$+5sq+arXD7))QTa`QfT|~`t61k&lClDL z1eM+y%?8~-#}o2@{lUOB;PozhH5iSUO1I&7WAq)V%NO;7N$X&`eNZ@R3}tp1p3IWxn|4Jd8<4j#K6 zXb6)u{_F7tRZ__pr2?4F!PMXFrS1Uo65S(Y-IXBIZFTZJ=Om{41>5JP^?_&{z4!R8 zF9y)WoomRCm0G2R%x$hUG` z3`pm^NEzQw0;_WD(D}W35Ui{*;K%Y*f9dT)**};MPf4y*bJW}cH1wr(W;6-_RyrF; zxf4NmtH17Ha5orq{@hX+hyY_Tt6%3$V!`C(yCMxHEFRzMgYF!PfJwEx)rN>G7-#-H zLxqgM$Rq8G4>JcC5)Y?c;dcVP(Hg4D+rL3q;y1Qe!3nfg?JB4;96`$@Du%XNe*)Cn zmKN8gq(J%GGLMuqb{>uWaNg9p405!j?WuS)ApJ7ktYzC2#Bbna>!h}TaA#+AgewCG zM5vN0y{yOjr{t@}B5J_3dC_F%nLM!lTw70TAOL0=jl#HATVNmvFD>(Z2{fP2MZ0Xi z1hVJz69uIDKzPN%{l)DTz;hxUe+lxj0Gy45%Bu~PX!oHVDN%wO+DeeM6uRSz)(vgB zL%%v;zCS(hsro9kJS?F`fg6J6vfF1*F(#wY=0%6`t7FH!cjvLZk(%5)###`bmRKf%Chy;VJ4?F(0hhQvd#}dZ!5lmPp#qe;igNbWP+{d*!Fp+r_tjs_FMq5eC zugEjOFq=z_VsID?#0Ova*p-6rey{r_ymz1jskk2^uzTTLWV~B|5@;mZ;9mT^2dWYm zng-2BK}kKrZ`1uJ$on3>f7PFg0GUQ{x-v=ZJJ1qZd%J%fMD?wo%^*S$9BEfZbos#d zBRopj6YF0;b)V{(Apj1sWMg)EMqn`y5^Ko042;C?bW*)(K*y(M{kr}uc2Yrii_1^_Q$+;OcvWm82k_7pq)^*N}Fge zpnQJrivI8vaGa3pC1Vc;;n5a5_osJ2R^{36Tzag2eL#c9n|sPK8bf<9=@qyaQ?pZ$I>;Pl2*t zx<3D&E-3hyE_@Zs0@VMw?tJ)*Si(OdXr{;Mj4{ij|HCIj7h@WB$G&6+d6~%V;->B*dhL4XsBQ@vPA@pj9)Af_JBg&=Q&N zxAOz?XwE!If1fU56wPFR%sInag~nUvwJXiS(ID`WxXJbYk9gFk>2!(p!+$#8p42ZN zmzH#(79FttJ~{=Q;XjmRbRUAq+mWex1`&{r_u}Z*z|Mo#v3u@ud7znKa+2rj6VNur z{nBX74Z1r5+f!|dU~nnym3)m481Yh^ul}q8MwttBf2VJ(f>F+5`$$a)7*U=7{gmP! z7^E}b(v9~AeU+m^TFgJ8`?i_QJW~U-16=~rd*T2+yEa)sCn=RQ}9PC_SQtRCq@eBcaUed5>!#6;4Upz!f?E+AEjlcPQuU6%V8T6rZvc=N#}v=n*P&M|}@ z%?7a*alYh5Q-(&FJ?~GWiC*h)D?D#BlAu@De^tngdMBQUUQPOT9ETSF2BTJK?_<1Q z7;#_Z&2^yBQhxO29}RHbd5Dv{`T#`v#0}Faa6q=qS6w#V1e6;*Z$xfnfu_-N=cQI% zfbH|_N_FF)>$Px{_+<|CUnVAXo%{rbyb7yS;!0rX(c$XLTDHgWOAP=Z1D(cLFQ ze`i3SEJISl<^<@b;(NHNPJ#~Ep+PPiF;)-ey=n-01{!bg^w3psf~xUMU#czECml?E zARed&a#Lr|jXXL6nTQM3FMgYW1oqWk!TOjYw#BEOHur-d!8pgg5FX%NGEM57{|;Pr ze!pZZH-Js@&SiI$3{3NJMoKK0?-@wmf6j(V0ZpXG6Cq1%9^=8ayl48ZK$7S_;B6)Z z_(KDizD81`BgYesQ!L(SM_8C@`7}0nr@C^7)JYhv-O{;U7$Jui3VSwa-> z-Agp7v|GK4&7F(|o+KezTSTLJc}jNN*xbClfs`(bHR}BHOfO1^+oc82XHg))zD@JfasZ%IUl9HKsG&s_g(WkD7$99_Gw@Pjf4P# zWm!xha@NOZ;5`pI?^(M-D6xCuvCWf5OPiqI$yCo(iVOO)fBQaj>Vp2eXH!N+37}V* zlX{Ck26Ryf7F1#KrmT% z{yUpK;1rkOm(@_9-3w=Tzx+r+8*Jt-!DT9FrQAzrW4aYBD@$K)?0t%+A9v{Wzg|X@ z8b7)SF`v??(FkSUkTn`6v8gwCT!8-K)RroF+M(Vbgm4wF9JR^usmc5Pr!Upj5_nhM zOgfGMQX#G0!{Pe!>0A$NE`-MKTz`C}EpVT;YWziM1)^eGL(TaYK=zbX_w?`~D1E&a zM<}KP>H!iDM&~;KDSKzdg>Hd%a^w)s?en0kL4w1QdLHzUdw|ca@1Q3jCK!z43%a$- zyJTqrpz}4OFh;r>v^9qW_Wh{^_5h zaE#$jWC6$&3mD$Txd9UQW7V5&qd}xLH1I$|4+M<{9;bYu2j1Niiv7-hz||`6T86Fw z8+F1H(k~gnBq!R`??4T7-^Y{jlT3leuFnRC_Yla9tIPX{ae??kB1vGzZ_Iz6c{L`j z9dK`{;e=R3p}lBLkt7#+w0}_uMRYdPXvJ0~wfYY!S|F27x4Qis&4vj-eX&4;#%3S2 zcyv~vQTabV4s65FP}rM&flHrI|MaJY6H`K{PjoWciVE9vb9iuvbHNQYPhOL`bpFFJ z?^Rly>$7_Q;iFdz)_8wdfmXcum0ox$a0@s-qQ+SRk(^gE=TR=mRDa@*e7SoQ6u%HQ z#osOm)x#f5(Z;7hQz0ek0sRh^*QhD*Y-0CCfyUFS`{tnIeS5KXyc2Z9ByHtOXhHkq zh20#$!U{F{RaAo~-Rr!S=mL`Hdn^x=GfPt-)Id!7(*ywuyT zyi7&=X|jwFQYX+RMA`GHB%zhNdD17{`p`T+i2U_EkEYBFyMLKEM$v>8L5&hY9Qv33 zg1@L3+e>sJlG-?V6ZKvYp7ys$MqROg90m?MP$$h72F_?t^t0q8Q}Z8QRBv=5{Nl~n z<2Zaiq}=N?eR7PCMmcsy@@j#We70d=u^G4`G-Ak$@Im-DwPQ%(6_D<7rMeKh3JPxN z87J;pgNkThv41^I6sUDo+va>41P!b0RfB>H!I#n+KDn`{deOy2k zZQ;}8*FmEOmEb%5}MgqKKa+*z1i@?d1A%7(G1P@rQ^zUAqAO(i159J_9 z4z#Y^0n}cGKf z5kgz!B`Kur9%vhwWeoo7 zixyR0wpQrq_%99>1>y`;xj<9E+-)C(3!Jyrdp?Q-=1ZcH$|ht5$y(*ou++OC*LT6< z_J`-7L>76EPVoR##8U#S=%qpJ!D8lIxhrUh|9{Z)A;s>G-XBkfC-*_UA$qt+ss_|D zF%QMj4^RoX;G2q@28zoc87}4Qfc)RyPK^O2kbO~jGSuxINYU4pl|^AbN@}Xwr~7Rn zbbmNe>+=-w?Mc_lvp)syFK>v1hVBBp$&tr&2sJS4{9-3t}* zGk?fKi~Ew;u>4%sOW zbUAZqRk?H&=h9^~C+}WpFguSXOp{(9fd^=Ws=Pcq6`QLP@i{|(pAq#(I7K{tQja>a zsy!~xiDG+cPMq70mqsm@l5|eBO+%O6fP;&!Bil#OVDj0VqqH85s6s1Qp9uG=%;aK_#!5kR_cAlzp{&JAXLH zF`wAVmOXagD;OI;eVf(>ayWd1b9`OBx2{j2(W~3FM~kc(#K~t z#K1dv(Y@sAKkVGm_Lrdl2`v3Qo-gdFfzfn!fi(3h&~at3{KfhUR8+tJ zZ8Kval>56_mS&92FSxxd+B-rAHGiF>xSR86r~k=tWVtBXkb<{{oZ)C?Mb`&^!wW3| zPwSFJ4w^R&3-g)rN8=Bzmva{Hp<(K2w<0T1)L+D&PN(6Cy7_#PDXv+fPL>K^OP(0i zl0|$y@ILnU&yNSWNjFhFI%hs`iV4-d+&X)$xD?e~dT8o$HvpBj2==Q7&V=?+q1p_?%PiDOnY`Iv3er=Gyb#?iPS_U@;K24Ni zdkN`g&tmU#HW)8l)BV?rzG~-Rr}?CYD*FUWbkZ5o=MbmFkV9(pG0W~vLf#h2((V+O zcCk6;J?(^W#|;j;V}HE=cwWQw8!1qnj^TRu2b&-4iD^}HIt{!=*6b9l3m_Qxsi9^K zAH+C=2>(jbfusw`-#3{aAT`ZqhqrtHGCoV^cUS?>8TUG+}eb z8HrMmq`STqe!&Jrzf`0zbYi|>8^;`X(^%l2?{SaBDFj}rCx7^zy_LZ6wOwm6_ye%$ zmye%JZRRy6o1joG3KUcyCZFq8?5D$YWa2I6z~p<7M}| z@1Wt{p1Pv$8GkfvQT;7BqZW04X_M8jOh;`;x1O1BVf)CAQnV&4B~T+~WUaKnDc4+if3`(N_LV zq34>{(b_PDU}D7wv>fQt=N)c|=J05HpF5GF$#ezQ`JN6mwwUu%!iy0NGk63mogG92 zA0~}x@lT`Prb6-2m@}x)Z-6rK`3>~TgG_6vq<;^6A9S-koi&8&nip*F?4?mv4bHoh z`7Ef?u5Ic5LINs2t=OJeUXIFEaar3FKB7G9M0Gx{X;jdJ=TrHB8QartI}#ayy_@rV zX*KVH3wmC2;fFXg!Eqeo9`vYXP{bVLZJWC|_dnGFF;NAnd=I#|bQDO<;Bvd=?N7?}_uYMNKva|tXU3D1~H!jfGg;@(#Cjga= zdj?gmIgs*2KZte0{Bmc8rv|Y+6!*MCpMML<0ov=&j{iGifVQ&97&R|zpmp9!fe=<) zw0y4G{}JLubGEGRnwS2e$ugUGf*WdR)RD{HMl%=voy+5ErPV`!(p&MFI^t0eH;LyE zKRI^4QjS(C3Zph_r^BceO4MN{FunQS7Tf3cG?8VT1=U=(bfk>dLglXK_hNzvP=85h z^Jc})1}bv)XM4R&h_b)Os{ZbaMa8~{o&zPdDE)}$c}!3g$|Q}Iads0zDYsw$Dvu#Y z2?IEu6IfpSsJl?>!neg^-XEHcpJS)_AH+c$KflYrlNba7>d}euBYdF#P3+uNkp_(6 zyqQN1e}PpxyQSI?sG@QjlUA}&26GLuGXQ-*bfc#6}9LS?n|z!$lK^c@$cLw zK2DT-yToaIb_!*YEBSK1uYX2qRhd5h(&VU6WcpB)V;CiUFIs;z6N=v4u}!5)-9}H_ zQybf)(@^xGfv?t|EOdXhMfg@8=l_UD_c`&*uT2si<2CB)R%z=OfJ-XG#m}n>WLL_a z?r@7?`xT1&oCvpo{+~lkv=1pT8Apb->0xu3r8`WD71_WnZ-`eNvVQ=~AG%fw{>=i@ zNJSro>I~( zKlaeb(~xf!G0SL>5nr0yC4N3WxOmi7^ew4uVBh^N~DhbyG^S#jzH4XFumlZWj z3uWMlSfg*xURTm&DPa5X=h&xQkI`}duB z@^$Vbg*8j|!>pk$h2(PWBa03{abI%&niMnf?sD|T^-d#G3M;4BL+RE*$~J>j(+xb^ zC|mNm@6tV}pwQFH@A8=Qq|h*~9(^F%g0=COlO&Vl_WSMSast(PUO>)Kc_xog=JK+Z7Sr3!fsgl#IzKCmMhS)&nWYdDP%Xr907()tYvvaVd(zvkhcn#51(FCuvJ)Hk@P z$8gE*Q~TTvt&KXkG6YK0KgisOCnsmx`L?-W>3?>mD5FO#g(}=|--!BA3T>AB5&cS9 z3f=jia!c}mDD*3b>rV|5b0amRl@T@)g?|5J?o9I$3ccdpoi(@miFfTgwz=9XP-rzs zi3bD(C{)eWUhU$-_|x^SEHot!zf5Zs7TBL+!J)uz=9d=cl=Z@7x7%Urv1gLbZV>*8 zH-FzsCnsUJ_TrgGEQa{JJu21f)+oB@pRrz0RKSO~rt|+8n4#^w=FL%`0=#|a&mVGA z8I1xO(U%=$P#1AoxpqPw)rO}vWhKK1878lMnaR&IRNgV6Sj$n?MD>r+syC7k}>i{kT}a!wE@xh3oge@gb@qW5GZ#9bxq> zZ+F{-A^5JGqUGFe_;N_An$A=rLtOM&^H3_mnfG@ZEqj9KSyLt_$rd<=s%Sb!q`^Lv z`LLrZ9V|$ZYc<9H`3YR4_ByvU;l)NBzGZOHKDIE&oTuTN2N#KX{jbb)nn@S1wtqMF zpstt?)?1gkN{D$0ss~KzBl&C;sxJe>H4EFZHc>rTai|n)<*G&Ymqf5CR+>gS?T8gF z>mZ>gU$Mv`dAcro0`uWxcj=@yVP;BjYx`^~#?1LR^oJuc@|$_>4BtA2BDsD+ZPE`MZ_8Kr+cAA2Oaj%aH~^27r=L@}wq7p0a!Kukn*l7AW_eAC|a zF{zos>tb?|PSAF^(*J!lotxm)Z7nP-kf4J@p*qhehPw#Y>i+B2hflCN)Y3jzk^$2P zl{Za%ZbSD~>GDK*^G4pSwsE>$9r>^CJ=Fe-SzS;8lM1@s0-W=aGWp*R{9$WLh_QWh3r6_U0USmnOdSjlGLYZovej!}dSca+uEOFtPbD zfa&wMQZ3v=F)iqJwEOEIrmB{IY)#X^IC~pkVp|QyDk_dkdw&f*stWJl z{U-)x^`sp+LR(N;AkC+~%LfIl)8F|YdLoCh(J^ehA2N-O-8*?m2^p=PR(y}!kS5-^ zZ^1GON&Gpn{eR|9kYv)OBcoq|aI)L7b#m(lu+w9*(b&QaYe^P?qfPH% z`9Z!}b%h;9jFIz<7pQUnib=N*ClyZJaTPRS-UI(fU4IsS6}xdPde2V3g*_k-I@9o! z5?tv}|FDSJj*Yx)M^v+)wS2n~e^d8Q?#$#P6iJ^2_n-szavF}e-^i?lU^ks?4Kcgx}SA^}gr((+J%$(Iyc1y;4 zru|EcJ%7_^*;!bTFk^v6*KaF)1yQJOIwH=+!H%j5QJ3R)I#H_ndBv@co8a4!xO$~> zkv~3kF`R+my@a&dF4kuu?aZk(hx4sS?3Bp~?H2=ni~&c^T^PBOaw z9#QXbL%ybCU(S6zW2F70Q!WTQn$AoPxmh@s>GVnVufSxB$(=RRJupm8ZoWj*0e!QG zFe&{~oM186cZeTHl9{2*rLP0PVwHKSKp?!|Njns(eS^#qC7raNxsW!Z=tsV!27g=2 zi+`7N8-fAKATk)kaGucnJ(pV< z4d+oYy^G8g8AR+WygtTaYIx==b+&D75`WLSNPnbmJ0ZI)(BzAD9x_9CVt)n`{4WNT zSCN*wxKLBvB4 zR><+}SEvatLBa{=>6oxJNZ+saF67t*cf0)VS2U{NU7*%&7BK~%SjQA|)HyKBk|HdF zsW;aD;o}Qgx#@lzacu>+#QW!xXn)!&8Dq_1g=QE1#webxc-s-P$G2|{uZR10Zuv_1 z?l)5Bq}r^Yp&&{nd-M)!_xjaQJNTp4K0}kf{t3!eR?KzEM)A_QZZ(hA9M8YSFY9zv z;IV4P3kl{*WOX*0H0?`3N_t=hX?rPBtMXUlmDQ1G#zW7}J%j{fn)j)v7=IBa_UADt z=TU^8?&jK;s|W9NwG0I=Ww?nC8T8FQhigIkB=`SvLl}MSlu9b#u0}O-HrfNW88ML| z{L*lx8FxSrH8+1>I zE;fhY$&RHZi|FA*k*Ce{<9}^1IuvY3x@QV`MJe5ed=DI|H2M=2`y z+GaCI@U8brvzo}mQ^F*=q z=P|-azB6by3Km#vEQG!Byj)R;=kPl6K3u%NqW>JZ1|dZkNd!Mldi`42u4vqo zjkb9h%Zd0HCA%sU5^h_Y?#veLM1f&NC+V*ss7>#&o_!ClJ3N2_(+3^#bh?wOLJo-Bm>^;`L zEb;{J^_Y%oWYbm9{T$g6_FWTp9JSUd*(Ky3>b3Qe4oArg9|Z5&3|EqSC+AA1S4!Xn9UckO1UL1U>5DwwH590??0ICRje^XpyA9e1{=nztwNY+1WLxI` z47Dwp97JvF>Ja{@}S@<~~>4p;xo>3Ge9d+quU9`g814XIA(|Zt+dXnC}^f`jo ztc%`XCiV|6p0a9_-EcoTY3bi_2VU#Z+2`Vf;g+aN`gz0;?mr}Mb*x*$mYGvGT4p<( z*lG9o{rL=o;d4oR&zqp(;#NX6ehpflGDC~Kb}$M!n1A^2cNEMNPV9^4ehA8NI3ty5 zC5(mj-6u*|AnC~OxS2Z&2RLptf1oZwxbpHTzKjThBfR`llGYvKJUM5SEnL(Cg1xSDBw;Wh}pvKRL@DSV|d6fUcI_e z2c{K~!L?hrHsTbbmsBSugNNHzPUY+;xC77Vt)*27u6B&T>@acePD)Ae(%lw=_?9;k zo_DVyuDppexGx{KKi>38i#>$MRLA06*=q=@vVZ4cR%}Jk{;HINq(yjsEx62X=>pen zqjOR%KjEBMl76|e9+pq8?4^tO47)tGk+_`Au(~m==D4E*HqWA*yX-n(aAJ_b<-##o zM23hSabN}56BpuWhjE~Ki0522337tm8PjLSpvLc`Y_=={UGwblpWcGlox!kK`{EC1 zIlJzk+)nULR5~b1?GD!$0XJwBli_;X=B5mx2Y+o6 z!VC(Zx54!7@g?9GOp+X_n6BDFJ@jOU*g;Mh6b=S6@i;(7Zh5b3fgMgyhzYIjp#??% z^gk6I`p~tMWsp&R0klT(C@nY+QB z+&WKfZUTkGLc446isU6lsru8_^?#tT5inq_DTm+tuwp-rb{Jh>Wv{W&29?}*o-6m~ zaQN1F-YE%MByKBGqf+oB|GHPvqQYehzO$cy#g3B5KO7_??lx7CNA7KVCP$k>o{3q} z<2xixZs!fzB$#QA8%9)lynL$U8m%M!w81IRbYZ$~!Zb_1FLgqU`THz6w=`8>yS4EX zIZ%`<)#Fjlzf~uXu1d?>)V%l~((`(B0000{00aO402FoK4FCW=00962Q!SHF1t^nW NR160JQ+{_flmLV?7GwYb delta 8554 zcmaKyRZyKx)TMEEcX#Ii3GVI|2=0X7?(pL7PJ%;lcY+<`bl2%o16ow4(X)CMCeI$R;5Ge`LBY2fE4;K;Gg~l z|F>ZI|MO2${BM8aAsG}xPXhv^FwDO*Sr(QD1%vg!Ae@>1?8%X^j5rva|K|dN2LciT zLgF$x5tb-<3pVrr?WPBG6%j7bA!cwOJR#4ty|~lP?)_06I>W{#KsQ9uiivw0?39@x zO`1r<8^QGW1oND4xs+y@gz~f?ElG<}Rk`5|K1tfHo5hy;Nu6#t|C3j*citBrn+v}) zpUs!9MK=*Z5FN7b8c9M9oPf>wHA#DL3xOa8A}r=MAqFbArw;gP{AmE}R`O^DGC*F@}5P40LUndmoW*E4K> zFc&c!@aP>Fn(f<|9f($0nw?9&$f~}}yeYC548sbhTzzkn(JwPj(9PUXJUvWLYt3~5 z_KR?xP~cn=0sz?@bC{bvqpM9mC^(lpeDp+Md;29B9(iMA;XbG_r8KkYXGv#rMRZ#X zK~Ceg2(8$9$j8evT^Qi;y6HBncwj`6@e$fjE+Y4;c`G7xxwQUDf;cAf_VEpe3q}DK zZ7zQgHt1JszXVPoN`7VD`nj<5J$||Sg%PWlNNiaNm*TFC;tYfUQ9E) z79B;f0(P22RFyS3q`4mv^PKMFAvlAH`?&UtEC-53*Tj{VI<$yIMk%6x zmb`?7yeLEWFBudPQg6FSKoqsag?C}0q1^E0WTfSFs;0Ts@x6P`6%J_l_DMW-I2t7e zl}GIWV%FiBq6FDa`g#1%yNtY}Z2a;k<%tgj;3%qr^Q@UwY$clh9Mu6kG0kBU^TtZK zsEw$3y4VSgw5HWC4@)4J+tvb?;7C1jMgA=&84i*(db5IT_L87X-7kj+(@mVIF$fB* z8yR=MR(^uEAR5*29z0escUbH8HM%elO+Df}#~}V6>NXU%RgSB8S*_N(6ZFn>*}>m@ zKui5LSrVDbRw@lQ<L5L7D#WUJ|;b147NXzarR)RVn5FsKr_W;NrAJ=IXsNaP;;M11D`J^`j$-{2z%HlIh%d z^)vK8^&w+<-0>yFZtN$j-kyg3C6oN}fIxPE&O#{Yl8=6nZb9wY&(;JvEMoQ?^~FQZ zD~}ud8Le#*Erm$v{V#_LuRP;gH)s@+;ZL;cj-Ezr()Il*VW(XHmeKFJCo>*ZKzwQ) zKlvD5)pw};qlCRiZSCtH(7}m54s(*iFD=5D0*fNarjJU|#1d;6FAE{H$E$=+yg@>S zTNir2z$U2QmJ3xJ+Cw2hmcyh?yxyt_65)B;1%yZ)L!uUg)v>SE?}EIGB9Xp&N4{jw ztodWC&iuYMjEA7P$7qLxj%f=uAne3hIdHhW<7jAXZGHG||0e1%zkqZ-QsT(s7@gUp zc0s2uew0Lcc#3rC8=gV9>2HHL6}H8E+Xglh#Vu0J*M79r_RD-9q*!x@8pizJ5#tI6 zl@X=1tzl=G*yP^Tw^X*jZv~EhS*$^BPSBl$)g`-re`f-U_BpR*Di2vRfsPJGGemC( zPsYXkQZhdbG&by>ki0|&pT9FR=vLgr-q9kw5V69CW1y13yqQeQFcWHZApzz~)CMXd zLIeH^p41@eN)KA8-R?qk>3zB*gIFt1@=!`2KM)a;E;s-DMDT`=fl=;pTiAwmUX

z{`8iv!U{U62zNriPkG=rfOG6I3A$uak@mCpeIQ(ZDBMU*&tLLb%F8K_i3{mqF4f!o zl$x|0&i~T2)NgJiMS9zq@)W61de=iw7W(PSyga>D+!*Ao;>mmeS9Eym5zIJWzYZ9l z6T7IreZqob{ZC}+xYqMD%1?CG&M-WTiZ*m+JZeHqN_0_j*skPXfv$>b^wUHC_OFS* zX3pl&*m#(g^ruzCS;^wI$u4kJ$rsXxD4l1lI?hwPUS)}N*;IKlEKtQay>m=H)6514 zZaLll@+Ag$ieSvUML-kGKfx1eCI`~8hvwF^%YthtHTjH4krPZRqQA?UUU?dvzvtk z7ii*iPqEeIeH4;q0`qSvD3>b+h$Y`?#4fKnTHv{85x!2@)MOpQzyFl1=9`A-8bLZ! zuh0X_2;1A9QGH*POg;Nd_!%YRTI36DA0LiqQ5cjt2S#oWT#(vg} zhVAPJEKcakbFA&qyRcMm=451t+;ONzcf43jMR=!BnV&_jKaxTnfITtD;|es^hJ zo5R-}M(z(<0Y(t|a#DQXc**XCPGsW{&!C*lesgxBq9vqX3vcF1ATl`*?ZC)Ki*aeE zsm!vW+)~G_PLVt#mAFjo;&nHH`G})OQ&F^Qzoz-&lJ-IM>q2kuc`Wrv3wN&8#lxRI zc~(!85J85A&r8^61aI-a+^0=svG887>vmrVBTs7o0`SZj7Qk&bIt6|a$XQ=jcH&SK zg6GcXq4!@0w+M1Y3|bda(aJ71RX{Vrow<3^xC?rg$#rxbqdRSzxHbh6t-e{bd7q)Y$*cT`QudEAM96r=A=q4R(4-A5+Iv0QLXGr=B-Z>ovrl4xv z8L$`&l50PgdaRaHo}H$DE7Lua)z7+$SQPJ@9owp|1)Ex>aKUKyGO;g89HHJSccG#~39lJ#bh?aPn7y zN+t6R#_yzEf7Z;^s5XX=A>) zxDEK;fI$-uQF`JTMr^@M8nL2zyb`bm4g;LMYmGQOnuI~_ZQJ%L8e z&w9U5TkHT8cTGI$4@P&XFi#_ZyR)YP zn7R;!X9#4ec-Rn1xFQ*?^W1Aixbs^AWqi<7ajmjIK4F;gud}B>zh%;Ozo=wds!kt| z+HOy%pD3RovAEbGvof25f&$sK+LY!T;~OF=*1sdiZuq&Q`E-k~?$qaLc;mPNB8bjc zTmo>=K8J9x6YATYqg-kwo!K0Ck(&~43|MrulWVmItigmmqQqX;kQqv3)jl`9M0ngT zj`W;BPr=XQKw2lauRFxXyo`J33ar=Q56s|usbFX%Ls&L-@8h|$ZEd}~7?1IGqC;ODmPV78xj zWnnrCj6O2XA_KI!R--QzAH>>wZ>x)9U9A*|T)D3p!?g?qd}2b`8wC*oS7C1u`j4(J zb-07%VS_eppjf7LRU=;^ed{7TGL@8u!>ZU`>agZ(iQ@G9^51%W6G^wkk1b+qzSS`< zF}-NjhTA2vt2rU}oHKJl&Fx#H4R2zQ$+{WtFVb)HX3)Ox&m3)Zn8=&_Hb3dT84)t< z%k0)XnsA+(X7dJ))c+iB*+&l4*4|=2ftuBT$bpH z&;$^Y1Mx7mrO|q10FX3lAQwkg&8|UrF4l%BXQYCwxA+I72j4RRt)~G;dh&EOoWUgP z!j=2vr71T_#U3L7J=M@{w8@%^S`|co%%z~eeI(fa%!zAYg;v^Om-iVKeJG~#$9jvq zEBo8vZ6v3bJ$wu7R(L}YT92{$&eCxnIY(Eg_|G#BBD}`GG;~d^#sd{5lA2F&mEoz5 zrk(U#dMVK7N-lJ^vpG}CB&LDnCnPJ_=-MVWJ81YK_@8Y7RGQhCKN&ri2MhNvVN+Y} zk8eFkWq$AzJULcH$yCB`Y}51R5<2t*m@Fe}W8xkf+}(mo3&PosLaC0?@SS^GwQ&Z9uVuR#dOnv_@&u|Im8aYWK2V|^en52*!TitQ~5#6Xh=z$9uW*nhMz#ezt?1Dh?rjFCv+nM`RQI=?RS zahpyStFYlBw`WLJ19xKAO51h);U_{vOxIllx-WAOrt-9|XXheyG&QQ�Un*VU*n{nJeHb?}=35sy{5sl&-M3FHFVjj0_=M+DQyB^WsC3L@KB zxiyV8661?J*8%wS0;S7rt3>oZMdewWxqz)9D~8pgcV&nZ-TWMe^Hfo;Np6et^I6y= zyuFn4W{}9b9Z1({f5-~^5`LhgIEvB#54rdU7FbdKhf9e5#}V*~k})7z8tl_hSpR>N z0ro$raPxmdh4ttE0~I8SOU*ZHl0wW&WwHK21ubE5i=2aPF7xsT6sgD<+;1ly@RQnp|+f+`>vXQP+?=~_x;V#4F(5`14jxB3Lj=LZR{uk31l!P zI3bY?i-CiH1c?62gRf8!{%gR(L(hhuD}2n7E!~czh>0XDYt5=X3Fo3WA-P;6iOO1C zax_pR491yNoen}miD=+s5j(ik(tTnv+dwMhB4rMj>$4sgJ$l!i<@vcUzKUK>w`BRC z83QLleU*_jK%Bo_%*J^;X|h&7C&4&m5b)f%@Cl)rfAtGn(ONewWN_P+ zLilj^;pNI=PmqUWa~BIWtF`UXj{V8jsW}yM7`@%2ij?Zo?uVD9P+zx@553&+(W3&) zO?SkjnO=W$;m1*6_qk;|GmBmP2RVZ!q9qfFbRXgk43~u$V>(82_7l%f87pdw#8Jwj zo7M`S6yVq_7T|dn`pB(@K?(c!K#cT(rZT;oqr<>h8Tur^JIKW*!ywc^HPk$y*KOK$ z@`su3{0x7`;qC+Z1DQni{AMdRE~lT7y_6gix$PapWbLeWF2k#zVoA=noJ2b7n?^TD zF@Hb^4u>|rCmIE#!uHJKpo;9qQC;ZLC9!J}@E*Zq=Lw>!tkTbTe1*&gFL&UpXNev! zwdu@}&Lvo`bZXc?p-*RGLr3AK&?HV~e?7jTMwDH$rzwAkqMQ||bWuN6aVk2{DFo)J z$&Q407eu?VLT+R9;SSEGsMZ^qjmmaB6RQJr_^mG_`0woZT*Xjj!}iZ(Ue6+ZGfe2 zP@U7xQOFDx-s+BHrX#ZQ5&D+X!IDkal?{>1~XEWfWq_)VdJ>oCE>v9L1_ zPd%oR2fSG)bzP>v*t0{PkmYDFM7~l3Bvx8TXnjAUrs2XIW0oqF0Wn)*CMMg<{;=^} zyJ2I-LUip&>8jazNToj4i|?vD1wC5zXx3C}0w&ypeYyhe!TBj)?435;rnVCZExjNCq5i)G-fBJ+h6=|+k zmbM{@SwCCw14zrOZTMo=;e5`pXAU;h2-d{8{go}eq>i~>f2jGYIT7;uZRCuH_jf|K zK}DWS7U#83zo^A{bz}$ghaks>KxoI0!)n1RL&+tgzfF%?*fTuEkLuVzu8mFj2|;r_dF^d{2_XAr-Y)xe>^=b zj3jjb1BUywI}fN$hDX(bsaEn>7Z0{B-OJgvnD1W6c-0sqz8km!a;hwNqrFMz+}5M9 zRpXhAV4+|9=R05d85})mkC_zgVkf%d_}wa#86t&BI}cq(_6U@QR8zP(F0vVo_}7l-_LuZ6Mi zC2ITnj2cp9E_LK3a9X9nR>>pTIb>khx4MA}^&Weu>!1#N#J&A7~uyh(BBts1w zv?SfSp1~*vzHZpR{$se+t+4kDV-5cNr9j8Z3H@>SfTehTF+fJJMww^~O<%_;CT$%P zJThHVpdJFiqOsa3)Q7Bsb4-%?Xvv`}*~j}UnvVl$pRf3S(`p-cC>G_84nk+ITqP23 z_|*=*PNx%2^5=E75;z{NyMUWy&5%JLlh5{ouB?M_?oLak4d(pNi=U;Xr*El@3qQ@; z`7Q+Ryi$4RdgH%o`qD_ylg+Wkkl{Qu zjL&(*ONIv!u-~T?Ix|!9r~pCTJPz{thP}rk+aNLe+oH469tTIZF&gh}A8P$Z5UYW* z8{8ai+h@!~7SYOCTx9|z80OFvAB=GOa_TsLVa<*4 zHm*{Zei14v3F$$lTK3RbZB;|pp~di7C!_&xyaZ2k!%Sxe!8iDYt4sz5Vw-2`Hd(`b z(uIi<<#G22_fM;6y<*J7&FSEO(%pPWduEN=p1f z?kFSR9>$Ob^%Vc1$(eQ<*n6GjCPl2okT`$XK!y~g4)k2MF~6%e8+l$QCnKkzT7=`; z?TP>4?;Pj$;t@3O_}DW)#b;EJRS5)L>gCrX*Mg<&?3`Nd~ z1+CZh8RJ4-MbfT`s}o{0IGTEUejrIQXT1#Xb3i7d^BSElas)(3b{*@TDq(CpuH9Qp zFolpgB|XV=L48hb7JfMEJAd@26%hlCH~56VL=4rBn3_?t-IWhN5h4$ty;vuH5rK~9 zJwXJWEvg>;F*9pZu`K#Er5J9Jl3Fv|W=&p?Q@) zWBKb>ySY574BXoWb&8ANwvfc*z)uPp+Ee}6J+H9HY(q4711exDBl12%O7gF^S-vT` zG9d$8;UX7jk7&p@y4b=Ef*eMgkGishD8fx;{2XKiL2Ux5UKNOPmq220SdW!0lw~zL zDMkNakLKjeInJ{T2jfENONJ%8uhpWTLuDQMpxe*m^z8T^E44AqmiTiHXT)IQ0jIuc z&7{PtDhPT($R@W4M#X3AXR?E%!>ks+b#!K~1%{0rOq8d{=Aref=lIcN0! zF@d0k6`Qb?E9&T|z=Hm`Yg(DYkknLOcHvUahNZ-^4;;qPHF?<)L9E@=Hdru`Q0<3LC6rVD~49SHM$bE+i7GNg(WaHAlP58?b zB5Hd*%hG!Od|~8j<|5=LAs#ccCCRsA0bnZj*pO>L{xIihlL2%+4?N#t zCogxnULtpyO7eSJZo8`QxdjQATC5z`*RDD*1?FT18JUOy*a8ZRy(ebH^=97=eUS;%37gu?5$UQv;!O zL|;+ZhNAznf!A=AyLBw^GTc`geswAYApYS=MCV@Mcz)L+jQ@&{vQ5utL}V+TYv}m1 zAp*&@rq$;#ZAKzo@w9A~O2z66D|IpbQO{To;GI@T#5g@nm?*k`g<|a%^5d*{A%9eO zB;|-A2uY+0@(!8oGGV~id}9GZo~Uc~&m)}#=VsN>!Y$gcLWkx+`v{tjEu#5*Sy+O1 zJExoRHW!VpRHuc}aX>3&K}ZTEddP&&*ru&j`0xt$_=9@N*5WQiHjyq@>n{ce?s8QG zz+Y`#-rND`T~JIVa}_|*M-vfhGCr-{ZBGr>5%h6Q&h&e<4zS;=2>lunX zD9OgL#){2_*9!@UE`oO6mlf;YWnJ3UI zbla#5zV?-V-6rSE%gQzT#AOEoDp!UDq?z9+D$PHXKBeAv9yNvJW4)k^>&PH8Ny!90 z>>J5!4y}cEVzJvuQr`yDFd17i7h#vSO?~P@sR{6N7CV1;N1v{59=b#t!1{43TU)ny zA)U*L)E7%MZ{w=N0}ZMWS~eDxLHk;AC|TfjKY5eltR;! zawIds5pk7!Yh6GEe868V8~In4T7lZ)`Guo%H95V;~$&Pl{iX=Ytx(eJtYm()9}ZH zC*aFo>Cs4`+i-ocK%Nl;R@;LQXm(l^kMivFc>+PIWwRxY3=8#FPDk7m{LQ(9$G@2d lwf`A?!1*7rAoL$t(4= 0.25 else 0.0 +poisson_up_err = lambda n : np.sqrt(n+0.75) + 1 +def get_abcd_error(A, B, C, D, E): + + sigma_AE_up = poisson_up_err(A+E) + sigma_AE_low = poisson_low_err(A+E) + + sigma_BD = math.sqrt(B+D) + sigma_C = math.sqrt(C) + + # Calculate the partial derivatives + partial_AE = C / (B + D) + partial_BD = -((A + E) * C) / ((B + D) ** 2) + partial_C = (A + E) / (B + D) + + # Calculate the propagated uncertainty + sigma_F_up = math.sqrt( + (partial_AE * sigma_AE_up) ** 2 + + (partial_BD * sigma_BD) ** 2 + + (partial_C * sigma_C) ** 2 + ) + + sigma_F_low = math.sqrt( + (partial_AE * sigma_AE_low) ** 2 + + (partial_BD * sigma_BD) ** 2 + + (partial_C * sigma_C) ** 2 + ) + return sigma_F_up, sigma_F_low + +def calc_exp_bkg(A, B, C, D, E): + #If A+E are 0, set A+E = value, based on nsigma errors. + #A poisson with mean=0.4 will throw 0 64% of the time + if A+E < 0.4: + A = 0.4 + E = 0.0 + exp_bkg = C*((A+E)/(B+D)) + counts = [A, B, C, D, E] + return exp_bkg, counts + +def run_abcd_method(data, signal_mass): + + #Get the search window + mass_low = signal_mass - signalProcessor.mass_resolution(signal_mass)*nsigma + mass_high = signal_mass + signalProcessor.mass_resolution(signal_mass)*nsigma + left_xlow = signal_mass - signalProcessor.mass_resolution(signal_mass)*left_nsigma + right_xhigh = signal_mass + signalProcessor.mass_resolution(signal_mass)*right_nsigma + print(f'Left:{left_xlow}-{mass_low} | Search:{mass_low}-{mass_high} | Right:{mass_high}-{right_xhigh}') + + #Signal mass window + mass_sel = {} + mass_sel[f'search_window'] = (data.unc_vtx_mass * 1000. <= mass_high) & (data.unc_vtx_mass * 1000. >= mass_low) + mass_sel[f'left_sideband'] = (data.unc_vtx_mass * 1000. <= mass_low) & (data.unc_vtx_mass * 1000. >= left_xlow) + mass_sel[f'right_sideband'] = (data.unc_vtx_mass * 1000. <= right_xhigh) & (data.unc_vtx_mass * 1000. >= mass_high) + + #Fill z0 distributions for each region + min_z0_h = ( + hist.Hist.new + .StrCategory(list(mass_sel.keys()), name='mass selection') + .Reg(400,-0.005,2.995,label='Vtx Min Z0 [mm]') + .Double() + ) + min_z0_h.fill(f'search_window', data[mass_sel[f'search_window']].unc_vtx_min_z0,weight=data[mass_sel[f'search_window']].weight ) + min_z0_h.fill(f'left_sideband', data[mass_sel[f'left_sideband']].unc_vtx_min_z0, weight=data[mass_sel[f'left_sideband']].weight) + min_z0_h.fill(f'right_sideband', data[mass_sel[f'right_sideband']].unc_vtx_min_z0, weight=data[mass_sel[f'right_sideband']].weight) + minz0_coeffs = signalProcessor.get_minz0_cut() + min_z0_cut = signalProcessor.polynomial(minz0_coeffs[0],minz0_coeffs[1],minz0_coeffs[2])(signal_mass) + print('min_z0_cut: ', min_z0_cut) + #Determine the min z0 cut floor. The ratio of potential signal to background in region C should be so small + #as to be negligible, or else the expected background in region F will be overestimated due to signal contamination in C, + #and our ability to make a discovery will be dramatically reduced. + xwidth = min_z0_h[f'search_window',:].axes[0].widths[0] + xmax = min_z0_cut - 2*xwidth + threshold = z0_floor_threshold + while xmax > 0.0: + sig_int = min_z0_h[f'search_window',:][hist.loc(xmax):hist.loc(min_z0_cut):sum] + if sig_int < threshold: + xmax = xmax - xwidth + else: + break + z0_floor = round(xmax,2) + + B = min_z0_h[f'left_sideband',:][hist.loc(z0_floor):hist.loc(min_z0_cut):sum] + A = min_z0_h[f'left_sideband',:][hist.loc(min_z0_cut)+1::sum] + D = min_z0_h[f'right_sideband',:][hist.loc(z0_floor):hist.loc(min_z0_cut):sum] + E = min_z0_h[f'right_sideband',:][hist.loc(min_z0_cut)+1::sum] + C = min_z0_h[f'search_window',:][hist.loc(z0_floor):hist.loc(min_z0_cut):sum] + counts = [A, B, C, D, E] + bkg_error = get_abcd_error(A, B, C, D, E) + expected_bkg, counts = calc_exp_bkg(A, B, C, D, E) + + #Get number of observed events + #tight_sel = signalProcessor.tight_selection(data, signal_mass, case=1) + + print('final sel') + minz0_sel = signalProcessor.minz0_sel(data) + masswindow_sel = signalProcessor.mass_sel(data, signal_mass) + final_sel = np.logical_and.reduce([masswindow_sel, minz0_sel]) + nobs = ak.sum(data[final_sel].weight) + + min_z0_h.reset() + return expected_bkg, nobs, bkg_error, counts + +####################################### P-Value Functions ########################################################################### +def get_t0(A, B, C, D, E, ntrials=100000): + + # Include background estimate statistical uncertainty in p-value calculation + # Sample three parent distributions + # Gaussian for (B+D) + # Gaussian for C + # Poisson for (A+E) + # Calculate expected background F. This is the mean of the expected background parent distribution, Poisson with mean F + # Sample the F parent distribution to measure the test statistic t0 + # Build test statistic distribution + + t0_distribution = ( + hist.Hist.new + .Reg(500, 0.0, 500.0, label='Expected Background Toy MC Trials') + .Double() + ) + + # Vectorized sampling + A_E_s = np.random.poisson(lam=(A+E), size=ntrials) + B_D_s = np.random.normal(loc=(B+D), scale=np.sqrt(B+D), size=ntrials) + C_s = np.random.normal(loc=C, scale=np.sqrt(C), size=ntrials) + + # Calculate F and t0 for all trials + F = (A_E_s / B_D_s) * C_s + t0 = np.random.poisson(lam=F) + + # Fill histogram + t0_distribution.fill(t0) + + return t0_distribution + +def get_pvalue(test_stat_h, nobs): + + #Get the number of events >= nobs + try: + nover = test_stat_h[hist.loc(nobs)::sum] + except: + nover = 0.0 + print('LOOK. NOVER = ', nover) + + #make a numerator and denominator histogram, convert to ROOT to get correct Poisson errors, then divide into TGraphAsymm + numer_h = ( + hist.Hist.new + .Reg(1, 0.0, 1.1, label='Events past nobs') + .Double() + ) + numer_h.fill(np.ones(int(nover))) + + denom_h = ( + hist.Hist.new + .Reg(1, 0.0, 1.1, label='Total events thrown') + .Double() + ) + denom_h.fill(np.ones(int(test_stat_h[::sum]))) + test_stat_h.reset() + + #convert to ROOT to get exact poisson errors + histos = [numer_h, denom_h] + uproot_file = uproot.recreate(f'tmp_cnv_histos.root') + for i, histo in enumerate(histos): + uproot_file[f'histo_{i}'] = histo + uproot_file.close() + infile = r.TFile(f'tmp_cnv_histos.root',"READ") + for i, histo in enumerate(histos): + histos[i] = copy.deepcopy(infile.Get(f'histo_{i}')) + infile.Close() + + #Divide number past versus number generated to get clopper pearson errors + histos[0].SetBinErrorOption(1) + histos[1].SetBinErrorOption(1) + result_g = r.TGraphAsymmErrors() + result_g.Divide(histos[0], histos[1], opt="cp") + + numer_h.reset() + denom_h.reset() + + mean = result_g.GetY()[0] + up_err = result_g.GetErrorYhigh(0) + low_err = result_g.GetErrorYlow(0) + + return mean, low_err, up_err + +#################################################################################################################################### +#Loop over mass and signal +inv_masses = np.array([x for x in range(inv_mass_range[0], inv_mass_range[-1])]) + +exp_bkg_mev=[] +nobs_mev=[] +bkg_uperror_mev = [] +bkg_lowerror_mev = [] +abcd_counts_mev = [] +pvalue_mev = [] +pvalue_uperr_mev = [] +pvalue_lowerr_mev = [] + +#Set signal/control region +if not args.tenpct and args.highPsum: + psum_sel = signalProcessor.psum_sel(data, case='cr') +elif args.tenpct and not args.highPsum: + psum_sel = signalProcessor.psum_sel(data, case='sr') +elif args.tenpct and args.highPsum: + psum_sel = signalProcessor.psum_sel(data, case='cr') +else: + print('Error. Cannot access that region yet') + +#get the Tight selection, without mass and minz0 cuts +#init_sel = signalProcessor.tight_selection(data, 0.0, case=3) +zcut_sel = signalProcessor.zcut_sel(data) +vprojsig_sel = signalProcessor.vprojsig_sel(data) +initial_sel = np.logical_and.reduce([zcut_sel, vprojsig_sel, psum_sel]) +print(initial_sel) +print(np.max(data[initial_sel].unc_vtx_min_z0)) + +#Loop over invariant mass range +for m,mass in enumerate(inv_masses): + print(f'Running Signal Mass Window Center {mass}') + sel = zcut_sel & vprojsig_sel & psum_sel + print('sel') + exp_bkg, nobs, bkg_error, counts = run_abcd_method(data[initial_sel], mass) + bkg_lowerror_mev.append(bkg_error[1]) + bkg_uperror_mev.append(bkg_error[0]) + exp_bkg_mev.append(exp_bkg) + nobs_mev.append(nobs) + abcd_counts_mev.append(counts) + print(f'background estimate: {exp_bkg} | nobs: {nobs} | counts: {counts} | bkg error: {bkg_error}') + + #Calculate the p-value by building the test statistic distribution t0 + t0_distr_h = get_t0(counts[0], counts[1], counts[2], counts[3], counts[4], ntrials=t0_trials) + pmean, plow_err, pup_err = get_pvalue(t0_distr_h, nobs) + print('pvalue: ', pmean) + pvalue_uperr_mev.append(pup_err) + pvalue_lowerr_mev.append(plow_err) + pvalue_mev.append(pmean) + +#cnv results to numpy floats +inv_masses = np.array(inv_masses, dtype=float) +exp_bkg_mev = np.array(exp_bkg_mev, dtype=float) +nobs_mev = np.array(nobs_mev, dtype=float) +bkg_uperror_mev = np.array(bkg_uperror_mev, dtype=float) +bkg_lowerror_mev = np.array(bkg_lowerror_mev, dtype=float) +pvalue_mev = np.array(pvalue_mev , dtype=float) +pvalue_uperr_mev = np.array(pvalue_uperr_mev, dtype=float) +pvalue_lowerr_mev = np.array(pvalue_lowerr_mev, dtype=float) + + +expected_bkg_g = r.TGraphAsymmErrors(len(inv_masses), inv_masses, exp_bkg_mev, np.zeros(len(inv_masses)), np.zeros(len(inv_masses)), bkg_lowerror_mev, bkg_uperror_mev) +expected_bkg_g.SetName('expected_background') +expected_bkg_g.SetTitle('Expected Background;Vd Invariant Mass [MeV]; Events') + +nobs_g = r.TGraph(len(inv_masses), inv_masses, nobs_mev) +nobs_g.SetName('Nobs') +nobs_g.SetTitle('Observed;Vd Invariant Mass [MeV]; Events') + +pvalue_g = r.TGraphAsymmErrors(len(inv_masses), inv_masses, pvalue_mev, np.zeros(len(inv_masses)), np.zeros(len(inv_masses)), pvalue_lowerr_mev, pvalue_uperr_mev) +pvalue_g.SetName('local_pvalue') +pvalue_g.SetTitle('Local P-Value;Vd Invariant Mass [MeV]; local p-value') + +#look elsewhere effect +avg_resolution = np.average(np.array([signalProcessor.mass_resolution(x) for x in inv_masses])) +look_elsewhere = np.array((inv_masses[-1] - inv_masses[0])/avg_resolution) +print(f'Average mass resolution: {avg_resolution}') +print(f'Look elsewhere effect: {look_elsewhere}') + +pvalue_global_g = r.TGraphAsymmErrors(len(inv_masses), inv_masses, pvalue_mev*look_elsewhere, np.zeros(len(inv_masses)), np.zeros(len(inv_masses)), pvalue_lowerr_mev*look_elsewhere, pvalue_uperr_mev*look_elsewhere) +pvalue_global_g.SetName('global_pvalue') +pvalue_global_g.SetTitle('Global P-Value;Vd Invariant Mass [MeV]; global p-value') + + +outfile = r.TFile(f'{outfilename}.root', "RECREATE") +outfile.cd() +expected_bkg_g.Write() +nobs_g.Write() +pvalue_g.Write() +pvalue_global_g.Write() + +outfile.Close() + +thresholds = [] +thresholds_lew = [] +from scipy.stats import norm +for nsigma in [1,2,3,4,5]: + gaus_cdf = norm.cdf(nsigma) + threshold = (1.0 - gaus_cdf)/look_elsewhere + thresholds_lew.append(threshold) + thresholds.append((1.0 - gaus_cdf)) + +print('Local Nsigma thresholds: ', thresholds) +print('Global LE thresholds: ', thresholds_lew) + + diff --git a/plotUtils/simps/simp_signal_2016.py b/plotUtils/simps/simp_signal_2016.py index 5996569fe..5d8cef659 100644 --- a/plotUtils/simps/simp_signal_2016.py +++ b/plotUtils/simps/simp_signal_2016.py @@ -15,8 +15,7 @@ from simp_theory_equations import SimpEquations as simpeqs class SignalProcessor: - def __init__(self, outfilename='expected_signal_output.root', mpifpi=4*np.pi, nsigma=2.0): - self.outfilename = outfilename + def __init__(self, mpifpi=4*np.pi, nsigma=1.5): self.nsigma = nsigma #SIMP parameters @@ -159,19 +158,84 @@ def get_exp_sig_eps2(self, signal_mass, signal_array, eps2): return signal_array - def tight_selection(self, array, signal_mass): - p0 = 1.07620094e+00 - p1 = -7.44533811e-03 - p2 = 1.58745903e-05 + @staticmethod + def get_minz0_cut(): + coeffs = [1.07620094e+00 + 0.1, -7.44533811e-03, 1.58745903e-05] + return coeffs + + def minz0_sel(self,array): + coeffs = self.get_minz0_cut() + p0 = coeffs[0] + p1 = coeffs[1] + p2 = coeffs[2] + sel = ( + ( array.unc_vtx_min_z0 > (p0 + p1*array.unc_vtx_mass*1000 + (p2*np.square(array.unc_vtx_mass*1000.))) ) + ) + return sel + + def mass_sel(self,array, signal_mass): mass_low = signal_mass - self.nsigma*self.mass_resolution(signal_mass) mass_high = signal_mass + self.nsigma*self.mass_resolution(signal_mass) - print(f'Signal Mass Window: {mass_low} - {mass_high} MeV') sel = ( - ( array.unc_vtx_min_z0 > (p0 + p1*array.unc_vtx_mass*1000 + (p2*np.square(array.unc_vtx_mass*1000.))) ) & - ( array.unc_vtx_mass*1000. >= {mass_low}) & (array.unc_vtx_mass*1000. <= {mass_high}) & - (array.unc_vtx_proj_sig < 2) & (array.unc_vtx_z > -4.3) & (array.unc_vtx_psum > 1.0) & (array.unc_vtx_psum < 1.9) + ( array.unc_vtx_mass*1000. >= {mass_low}) & (array.unc_vtx_mass*1000. <= {mass_high}) + ) + return sel + + @staticmethod + def psum_sel(array, case='sr'): + if case == 'sr': + sel = ( + (array.unc_vtx_psum > 1.0) & (array.unc_vtx_psum < 1.9) + ) + elif case == 'cr': + sel = ( + (array.unc_vtx_psum > 1.9) & (array.unc_vtx_psum < 2.4) + ) + else: + sel = () + return sel + + @staticmethod + def vprojsig_sel(array): + sel = ( + (array.unc_vtx_proj_sig < 2) ) return sel + + @staticmethod + def zcut_sel(array): + sel = ( + (array.unc_vtx_z > -4.3) + ) + return sel + + + def tight_selection(self, array, signal_mass, case=1): + coeffs = self.get_minz0_cut() + p0 = coeffs[0] + p1 = coeffs[1] + p2 = coeffs[2] + mass_low = signal_mass - self.nsigma*self.mass_resolution(signal_mass) + mass_high = signal_mass + self.nsigma*self.mass_resolution(signal_mass) + + if case == 1: #full tight analysis selection + sel = ( + ( array.unc_vtx_min_z0 > (p0 + p1*array.unc_vtx_mass*1000 + (p2*np.square(array.unc_vtx_mass*1000.))) ) & + ( array.unc_vtx_mass*1000. >= {mass_low}) & (array.unc_vtx_mass*1000. <= {mass_high}) & + (array.unc_vtx_proj_sig < 2) & (array.unc_vtx_z > -4.3) & (array.unc_vtx_psum > 1.0) & (array.unc_vtx_psum < 1.9) + ) + if case == 2: #tight selection without minz0 cut + sel = ( + ( array.unc_vtx_mass*1000. >= {mass_low}) & (array.unc_vtx_mass*1000. <= {mass_high}) & + (array.unc_vtx_proj_sig < 2) & (array.unc_vtx_z > -4.3) & (array.unc_vtx_psum > 1.0) & (array.unc_vtx_psum < 1.9) + ) + + if case == 3: #tight selection without mass and minz0 cut + sel = ( + (array.unc_vtx_proj_sig < 2) & (array.unc_vtx_z > -4.3) & (array.unc_vtx_psum > 1.0) & (array.unc_vtx_psum < 1.9) + ) + + return sel if __name__ == '__main__': @@ -190,7 +254,7 @@ def tight_selection(self, array, signal_mass): #Create MC signal analysis tuple processor print('Initialize signal processor') - processor = SignalProcessor(outfilename='expected_signal_output.root', mpifpi=mpifpi, nsigma=nsigma) + processor = SignalProcessor(mpifpi=mpifpi, nsigma=nsigma) #Set the differential radiative trident rate lookup table used to scale expected signal print('Load lookup table') @@ -250,9 +314,9 @@ def tight_selection(self, array, signal_mass): expected_signal_vd_h.fill(signal_mass, logeps2_range[l], weight=total_yield) expected_signal_ap_h.fill(signal_mass*processor.mass_ratio_ap_to_vd, logeps2_range[l], weight=total_yield) -outfile = uproot.recreate(outfilename) -outfile['expected_signal_vd_h'] = expected_signal_vd_h -outfile['expected_signal_ap_h'] = expected_signal_ap_h + outfile = uproot.recreate(outfilename) + outfile['expected_signal_vd_h'] = expected_signal_vd_h + outfile['expected_signal_ap_h'] = expected_signal_ap_h From ae1080ca08c6443eee5ec5b6a30b8ff8241e6431 Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Wed, 24 Jul 2024 09:49:52 -0700 Subject: [PATCH 16/27] backup simp analysis scripts --- plotUtils/simps/run_opt_interval.py | 318 ++++++++++++++++++++++++++++ plotUtils/simps/simp_signal_2016.py | 49 ++++- 2 files changed, 365 insertions(+), 2 deletions(-) create mode 100644 plotUtils/simps/run_opt_interval.py diff --git a/plotUtils/simps/run_opt_interval.py b/plotUtils/simps/run_opt_interval.py new file mode 100644 index 000000000..380b257a9 --- /dev/null +++ b/plotUtils/simps/run_opt_interval.py @@ -0,0 +1,318 @@ +#!/usr/bin/python3 +import os +import awkward as ak +import numpy as np +import hist +from hist import Hist +import uproot +import math +import ROOT as r +import simp_signal_2016 +from simp_theory_equations import SimpEquations as simpeqs +import copy +import pickle + +############################################################################################# +def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): + """ + Returns a list of the sizes of the K-largest intervals in that run according to the energy spectrum (given as a CDF). + That is, kLargestIntervals(...)[i] is the size of the largest interval containing i events, where ‘largest’ is defined above. + + * Transform energies to energy cumulants + * Add events at 0 and 1 + * Foreach k, compute interval sizes, take max + """ + answer = {} + list_of_energies.sort() + energy_cumulants = spectrumCDF(list_of_energies) + for i,interval_size in enumerate(range(len(energy_cumulants))): + if (1 + interval_size) >= len(energy_cumulants): + continue + + temp_data = energy_cumulants.copy() + gap_sizes = temp_data[(1+interval_size):] - temp_data[0:-1*(1 + interval_size)] + + answer[interval_size] = np.max(gap_sizes) + return answer +########################################################################################### + +import argparse +parser = argparse.ArgumentParser(description='Process some inputs.') +parser.add_argument('--outfilename', type=str, default='oim_results') +parser.add_argument('--tenpct', type=int, default=1) +parser.add_argument('--highPsum', type=int, default=0) +parser.add_argument('--mpifpi', type=float, default=4*np.pi) +parser.add_argument('--signal_sf', type=float, default=1.0) +parser.add_argument('--nsigma', type=float, default=1.5) + +args = parser.parse_args() +outfilename = args.outfilename +mpifpi = args.mpifpi +nsigma = args.nsigma +signal_sf = args.signal_sf +print(f'Search Window Size: +-', nsigma) + +#Initialize Signal Processor +signalProcessor = simp_signal_2016.SignalProcessor(mpifpi=mpifpi, nsigma=nsigma) + +#Load Data +data = ak.Array([]) +if args.tenpct: + outfilename = f'{outfilename}_10pct' + #Load 10% data signal region + inv_mass_range = (30,124) + print('Loading 10% Data') + branches = ["unc_vtx_mass","unc_vtx_psum", "unc_vtx_ele_track_z0", "unc_vtx_pos_track_z0", "unc_vtx_z", "unc_vtx_proj_sig"] + infile = '/sdf/group/hps/user-data/alspellm/2016/data/full_hadd_blpass4c_ana.root' + selection = 'vtxana_Tight_2016_simp_reach_SR' + data = signalProcessor.load_data(infile,selection, expressions=branches) + data['weight'] = 1.0 + +else: + outfilename = f'{outfilename}_100pct' + #Load 100% data + print('Loading 100% Data') + inv_mass_range = (30,200) + branches = ["unc_vtx_mass","unc_vtx_psum", "unc_vtx_ele_track_z0", "unc_vtx_pos_track_z0", "unc_vtx_z", "unc_vtx_proj_sig"] + indir = '/fs/ddn/sdf/group/hps/users/alspellm/data_storage/pass4kf/pass4kf_ana_20240513' + #If high psum, can look at all masses + if args.highPsum: + selection = 'vtxana_Tight_2016_simp_reach_CR' + mass_safety = 'unc_vtx_mass*1000. >= 0' + else: + selection = 'vtxana_Tight_2016_simp_reach_SR' + inv_mass_range = (135,200) + mass_safety = 'unc_vtx_mass*1000. > 135' #CANT LOOK BELOW THIS MASS UNTIL UNBLINDING! + + for filename in sorted(os.listdir(indir)): + if not filename.endswith('.root'): + continue + run = filename.split('_')[4] + print('Loading Run ', run) + infile = os.path.join(indir,filename) + data = ak.concatenate([data, signalProcessor.load_data(infile, selection, cut_expression=mass_safety, expressions=branches)]) + data['weight'] = 1.0 + + +#Set the differential radiative trident rate lookup table used to scale expected signal +print('Load lookup table') +cr_data = '/sdf/group/hps/user-data/alspellm/2016/data/hadd_BLPass4c_1959files.root' +preselection = "vtxana_Tight_nocuts" +signal_mass_range = [x for x in range(20,130,1)] +signalProcessor.set_diff_prod_lut(cr_data, preselection, signal_mass_range) + +#Initialize the range of epsilon2 +#masses = [x for x in range(50,56,2)] +masses = [x for x in range(inv_mass_range[0], inv_mass_range[-1]+2,2)] +ap_masses = [round(x*signalProcessor.mass_ratio_ap_to_vd,1) for x in masses] +eps2_range = np.logspace(-4.0,-8.0,num=100) +logeps2_range = np.log10(eps2_range) +min_eps = min(np.log10(eps2_range)) +max_eps = max(np.log10(eps2_range)) +num_bins = len(eps2_range) + +#make histos to store results +total_yield_h = ( + hist.Hist.new + .Reg(len(masses), np.min(masses),np.max(masses),label='Invariant Mass [MeV]') + .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') + .Double() +) +excluded_signal_h = ( + hist.Hist.new + .Reg(len(masses), np.min(masses),np.max(masses),label='Invariant Mass [MeV]') + .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') + .Double() +) +sensitivity_h = ( + hist.Hist.new + .Reg(len(masses), np.min(masses),np.max(masses),label='Invariant Mass [MeV]') + .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') + .Double() +) + +####################################################################################################################################### + +#Load lookup table +with open('interval_ntrials_10000.p', 'rb') as f: + # Load the object from the pickle file + lookupTable = pickle.load(f) + +for signal_mass in masses: + print(f'Signal Mass {signal_mass}') + + #Set signal window + mass_low = signal_mass - signalProcessor.mass_resolution(signal_mass)*nsigma + mass_high = signal_mass + signalProcessor.mass_resolution(signal_mass)*nsigma + + #Build the selection for data + zcut_sel = signalProcessor.zcut_sel(data) + vprojsig_sel = signalProcessor.vprojsig_sel(data) + minz0_sel = signalProcessor.minz0_sel(data) + masswindow_sel = signalProcessor.mass_sel(data, signal_mass) + #Set signal/control region + if not args.tenpct and args.highPsum: + psum_sel = signalProcessor.psum_sel(data, case='cr') + elif args.tenpct and not args.highPsum: + psum_sel = signalProcessor.psum_sel(data, case='sr') + elif args.tenpct and args.highPsum: + psum_sel = signalProcessor.psum_sel(data, case='cr') + else: + print('Error. Cannot access that region yet') + tight_sel = np.logical_and.reduce([zcut_sel, vprojsig_sel, psum_sel, minz0_sel, masswindow_sel]) + #tight_sel = np.logical_and.reduce([zcut_sel, vprojsig_sel, psum_sel, masswindow_sel]) + data_z = data[tight_sel].unc_vtx_z + print(data_z) + + #Load MC Signal + indir = '/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/beam/smeared' + signal_pre_readout_path = lambda mass: f'/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/nobeam/mass_{mass}_simp_2pt3_slic_hadd_ana.root' + signal_path = lambda mass: f'{indir}/mass_{mass}_hadd-simp-beam_ana_smeared_corr.root' + signal_selection = 'vtxana_radMatchTight_2016_simp_SR_analysis' + + #Get the total signal yield as a function of eps2 + total_yield_per_epsilon2 = signalProcessor.total_signal_production_per_epsilon2(signal_mass) + print('Total Yield Per eps2: ', total_yield_per_epsilon2) + + print('Load Signal ', signal_path(signal_mass)) + signal = signalProcessor.load_signal(signal_path(signal_mass), signal_pre_readout_path(signal_mass), signal_mass, signal_selection) + + #Build the selection for signal + zcut_sel = signalProcessor.zcut_sel(signal) + vprojsig_sel = signalProcessor.vprojsig_sel(signal) + minz0_sel = signalProcessor.minz0_sel(signal) + masswindow_sel = signalProcessor.mass_sel(signal, signal_mass) + #Set signal/control region + if not args.tenpct and args.highPsum: + psum_sel = signalProcessor.psum_sel(signal, case='cr') + elif args.tenpct and not args.highPsum: + psum_sel = signalProcessor.psum_sel(signal, case='sr') + elif args.tenpct and args.highPsum: + psum_sel = signalProcessor.psum_sel(signal, case='cr') + else: + print('Error. Cannot access that region yet') + tight_sel = np.logical_and.reduce([zcut_sel, vprojsig_sel, psum_sel, minz0_sel, masswindow_sel]) + signal = signal[tight_sel] + + #Loop over eps2 values and reweight the signal + print('Looping over eps2') + for i, eps2 in enumerate(eps2_range): + signal = signalProcessor.get_exp_sig_eps2(signal_mass, signal, eps2) + total_yield = ak.sum(signal['reweighted_accxEff'])*total_yield_per_epsilon2*eps2 + if i%20 == 0: + print(f'eps2 = {eps2}') + print(total_yield) + + #Make signal efficiency in recon z + exp_sig_eff_z = ( + hist.Hist.new + .Reg(140, -40.0,100.0,label=r'Recon z [mm]') + .Double() + ) + exp_sig_eff_z.fill(signal.unc_vtx_z, weight=signal.reweighted_accxEff*total_yield_per_epsilon2*eps2) + + #Convert the data to a uniform distribution in recon z, according to the expected signal distribution + data_uniform_z = ( + hist.Hist.new + .Reg(101, -0.005,1.005,label=r'Recon z [mm]') + .Double() + ) + + dataArray = np.zeros(len(data_z)+2) + dataArray[0] = 0.0 + for k in range (0, len(data_z)): + thisX = data_z[k] + dataArray[k+1] = total_yield - exp_sig_eff_z[hist.loc(thisX)::sum] + + dataArray[len(data_z)+1] = total_yield + dataArray = dataArray/total_yield + dataArray = np.nan_to_num(dataArray, nan=1.0) + dataArray[0] = 0.0 + dataArray.sort() + data_uniform_z.fill(dataArray) + + kints = kLargestIntervals(dataArray) + + #Loop through lookup table to find confidence level + mu_90p = 99999.9 + k_90p = -1 + conf_90p = -1.0 + + for i,mu in enumerate(sorted(lookupTable.keys())): + best_k = -1 + best_conf = -1.0 + for k in sorted(lookupTable[mu].keys()): + if k > len(kints)-1: + break + x = np.max(kints[k]) + conf = np.where(lookupTable[mu][k] < x)[0].size / (10000) + if conf > best_conf: + best_k = k + best_conf = conf + #mu_v_eps2_hh.fill(eps2, mu, weight=best_conf) + if best_conf >= 0.9: + mu_90p = mu + k_90p = best_k + conf_90p = best_conf + #print(f'90% confidence upper limit on mu={mu_90p}, when k={k_90p}') + #print(f'Confidence level: ', conf_90p) + break + + #Fill histogram results + total_yield_h.fill(signal_mass, np.log10(eps2), weight=total_yield) + excluded_signal_h.fill(signal_mass, np.log10(eps2), weight=mu_90p) + sensitivity_h.fill(signal_mass, np.log10(eps2), weight=(total_yield/mu_90p)) + +outfile = uproot.recreate(f'{outfilename}.root') +outfile['total_yield_h'] = total_yield_h +outfile['excluded_signal_h'] = excluded_signal_h +outfile['sensitivity_h'] = sensitivity_h + +''' +#Get sensitivity contour +sens = sensitivity_h.values() +print(sens) +indices = np.where(sens >= 0.1) +print(indices) +if len(indices) > 0: + massx = + x = np.array(sensitivity_h.axes[0].centers[indices[0]]) + y = np.array(sensitivity_h.axes[1].centers[indices[1]]) + #get the contour + cupper = [] + clower = [] + for i in set(x): + cupper = y[np.where(x==i)[0][-1]] + clower = y[np.where(x==i)[0][0]] + cupper = cupper + list(reversed(clower)) + cupper[0] + + contour_g = r.TGraph(len(x), np.array(cupper,dtype=float), np.array(clower,dtype=float)) + contour_g.SetName('exclusion_contour_g') + contour_g.SetTitle('Exclusion Contour;Invariant Mass [MeV];#epsilon^{2}') + outfile['contour_g'] = contour_g +''' + +''' +fig, ax = plt.subplots(figsize=(30,20)) +total_yield_h.plot() +#plt.xlim(20.0, 126.0) +#plt.ylim(-10.8, -3.8) +plt.show() + +fig, ax = plt.subplots(figsize=(30,20)) +excluded_signal_h.plot() +#plt.xlim(20.0, 126.0) +#plt.ylim(-10.8, -3.8) +#plt.show() + +fig, ax = plt.subplots(figsize=(30,20)) +sensitivity_h.plot(cmin=1.0) +#plt.xlim(20.0, 126.0) +#plt.ylim(-10.8, -3.8) +plt.show() +''' + + + + + diff --git a/plotUtils/simps/simp_signal_2016.py b/plotUtils/simps/simp_signal_2016.py index 5d8cef659..6214b0991 100644 --- a/plotUtils/simps/simp_signal_2016.py +++ b/plotUtils/simps/simp_signal_2016.py @@ -7,6 +7,7 @@ import math import ROOT as r import matplotlib as mpl +import copy import mplhep import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages @@ -201,6 +202,13 @@ def vprojsig_sel(array): (array.unc_vtx_proj_sig < 2) ) return sel + + @staticmethod + def sameside_z0_cut(array): + sel = ( + (-1.*(array.unc_vtx_ele_track_z0*array.unc_vtx_pos_track_z0) > 0) + ) + return sel @staticmethod def zcut_sel(array): @@ -237,6 +245,37 @@ def tight_selection(self, array, signal_mass, case=1): return sel + @staticmethod + def readROOTHisto(infilename, histoname): + infile = r.TFile(f'{infilename}',"READ") + histo = copy.deepcopy(infile.Get(f'{histoname}')) + infile.Close() + return histo + + + @staticmethod + def cnvHistosToROOT(histos=[], tempname='temporary_uproot'): + return_histos = [] + uproot_file = uproot.recreate(f'trash_{tempname}.root') + for i, histo in enumerate(histos): + uproot_file[f'histo_{i}'] = histo + uproot_file.close() + infile = r.TFile(f'trash_{tempname}.root',"READ") + for i, histo in enumerate(histos): + return_histos.append(copy.deepcopy(infile.Get(f'histo_{i}'))) + infile.Close() + return return_histos + + @staticmethod + def cnvHistoToROOT(histo, tempname='temporary_uproot'): + uproot_file = uproot.recreate(f'trash_{tempname}.root') + uproot_file['histogram'] = histo + uproot_file.close() + infile = r.TFile(f'trash_{tempname}.root',"READ") + root_hist = (copy.deepcopy(infile.Get(f'histogram'))) + infile.Close() + return root_hist + if __name__ == '__main__': parser = argparse.ArgumentParser(description='Process some inputs.') @@ -304,8 +343,14 @@ def tight_selection(self, array, signal_mass, case=1): signal = processor.load_signal(signal_path(signal_mass), signal_pre_readout_path(signal_mass), signal_mass, signal_selection) #Get Tight selection - tight_sel = processor.tight_selection(signal, signal_mass) - + psum_sel = processor.psum_sel(signal, case='sr') + zcut_sel = processor.zcut_sel(signal) + vprojsig_sel = processor.vprojsig_sel(signal) + minz0_sel = processor.minz0_sel(signal) + masswindow_sel = processor.mass_sel(signal, signal_mass) + sameside_sel = processor.sameside_z0_cut(signal) + tight_sel = np.logical_and.reduce([psum_sel,zcut_sel, vprojsig_sel, psum_sel, minz0_sel, masswindow_sel, sameside_sel]) + #tight_sel = processor.tight_selection(signal, signal_mass) for l, eps2 in enumerate(eps2_range): signal = processor.get_exp_sig_eps2(signal_mass, signal, eps2) From 1bd62a8f1ad0d581e6a1f6df0142f0da86009afb Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Sat, 27 Jul 2024 13:54:45 -0700 Subject: [PATCH 17/27] save --- plotUtils/simps/cmax.py | 342 +++++++++++++++++++++++++++ plotUtils/simps/run_opt_interval.py | 142 +++++++++-- plotUtils/simps/run_signal_search.py | 11 +- plotUtils/simps/simp_signal_2016.py | 59 +++-- 4 files changed, 516 insertions(+), 38 deletions(-) create mode 100644 plotUtils/simps/cmax.py diff --git a/plotUtils/simps/cmax.py b/plotUtils/simps/cmax.py new file mode 100644 index 000000000..ece546641 --- /dev/null +++ b/plotUtils/simps/cmax.py @@ -0,0 +1,342 @@ +#!/usr/bin/env python3 +# coding: utf-8 + +# # $\bar{C}_\text{max}(1 - \alpha, \mu)$ + +# Computing $\bar{C}_\text{max}(C, \mu)$ for optimum interval calculation, where $\mu$ is the number of expected events and $1 - \alpha$ is how frequently you reject the null hypothesis when it is true. + +# The single-event energy spectrum, that is, the probability density function which tells us which energy depositions are likely to occur, is independent of the chosen WIMP model -- we always expect a simple exponential recoil spectrum. +# +# The number of dark matter events detected does depend on the WIMP mass and cross-section. We know, however, that it must follow a Poisson distribution, which leaves the Poisson mean (which equals the expected number of events) as the only parameter left to estimate. From an upper limit on this mean, an upper limit curve in the dark matter mass – cross-section plane can be computed. +# +# * A list_of_energies list of reconstructed energy depositions of single events (from here on simply ‘energies’), either measured during some run of an actual detector, or generated using Monte Carlo.) +# * An interval is an interval in energy space. +# * The size of an interval is the fraction of energies expected in that interval. Clearly, this depends on which energy spectrum we assume, but is independent of the Poisson mean we are trying to constrain. By definition this is a number between 0 and 1. +# * The K-largest interval of a run is the largest interval containing K events in that run. Recall our definition of size: a ‘large’ interval is one which is unusually empty in that run. Clearly k-largest intervals will terminate at (or technically, just before) an observed energy, or at one of the boundaries of our energy space. Again, which interval in a run is the k–largest, depends on our energy spectrum, but not on our Poisson mean. +# * The extremeness of a K-largest interval is the probability of finding the K-largest interval in a run to be smaller. This clearly does depend on the Poisson mean: if we expect very few events, large gap sizes are more likely. Clearly extremeness is a number between 0 and 1; values close to 1 indicate unusually large intervals, that is, usually large (almost-)empty regions in the measured energies. +# For example, if the extremeness of a k-largest interval in a run is 0.8, that means that 80% of runs have k-largest intervals which are smaller than the k-largest interval in this run. +# * The optimum interval statistic of a run is extremity of the most extreme k-largest interval in a run. +# * The extremeness of the optimum interval statistic is the probability of finding a lower optimum interval statistic, that is, of finding the optimum interval in a run to be less extreme. +# +# The max gap method rejects a theory (places a mean outside the upper limit) based on a run if the 0-largest interval (the largest gap) is too extreme. +# +# The optimum interval method rejects a theory based on a run if the optimum interval statistic is too large. +# +# * The energy cumulant $\epsilon(E)$ is the fraction of energies expected below the energy $E$. Whatever the (1-normalized) energy distribution $dN/dE$, $dN/d\epsilon$ is uniform[0,1], where $0$ and $1$ correspond to the boundaries of our experimental range. +# +# + +import functools +from scipy.optimize import brenth +import matplotlib.pyplot as plt +import numpy as np +import pickle +import os +import sys +import time + + +# Disable +def blockPrint(): + sys.stdout = open(os.devnull, 'w') + +# Restore +def enablePrint(): + sys.stdout = sys.__stdout__ + +def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): + """ + Returns a list of the sizes of the K-largest intervals in that run according to the energy spectrum (given as a CDF). + That is, kLargestIntervals(...)[i] is the size of the largest interval containing i events, where ‘largest’ is defined above. + + * Transform energies to energy cumulants + * Add events at 0 and 1 + * Foreach k, compute interval sizes, take max + """ + answer = {} + + list_of_energies.sort() + #print('list of energies', list_of_energies) + + energy_cumulants = spectrumCDF(list_of_energies) + #print('cumulants', energy_cumulants) + + for i,interval_size in enumerate(range(len(energy_cumulants))): + if (1 + interval_size) >= len(energy_cumulants): + continue + + temp_data = energy_cumulants.copy() + gap_sizes = temp_data[(1+interval_size):] - temp_data[0:-1*(1 + interval_size)] + #print(f'gap sizes in interval_size {i+1}:', gap_sizes) + + answer[interval_size] = np.max(gap_sizes) + return answer + +def extremenessOfInterval(x, k, mu): + """ + Returns the extremeness of a k-largest interval of size, if the poisson mean is mu. + + (Number of itvSizes[mu][k] smaller than size) / mcTrials[mu] + + x - also size in above comment + k - gap (rename k) + """ + #print('extremenessOfInterval') + #print(f'mu is {mu}') + # [0] is because where returns list, where [0] is answer + if k not in itvSizes[mu]: + return 0 + ##print('extremeness of x:', x) + #print(f'mu is {mu}') + #print(f'k is {k}') + #print('mcTrials[mu]:', mcTrials[mu]) + #print(itvSizes[mu][k]) + #print(np.where(itvSizes[mu][k] < x)[0]) + #print(f'number of gaps with size less than {x}:', np.where(itvSizes[mu][k] < x)[0].size) + #print(f'fraction of gaps less than {x}:', np.where(itvSizes[mu][k] < x)[0].size / mcTrials[mu]) + return np.where(itvSizes[mu][k] < x)[0].size / mcTrials[mu] + + +def optimumItvStatistic(list_of_energies, mu, spectrumCDF = lambda x: x): + """ + Returns the optimum interval statistic of the run. + + Max of extremenssOfInterval's + """ + #print('running optimumItvStatistic') + #print(f'mu is {mu}') + #print(f'list of energies: {list_of_energies}') + #print('check klargest intervals:', kLargestIntervals(list_of_energies, spectrumCDF).items()) + #print('max is', np.max([extremenessOfInterval(x, k, mu) for k, x in kLargestIntervals(list_of_energies, spectrumCDF).items()])) + return np.max([extremenessOfInterval(x, k, mu) for k, x in kLargestIntervals(list_of_energies, spectrumCDF).items()]) + + +def extremenessOfOptItvStat(stat, mu): + """ + Returns the extremeness of the optimum interval statistic stat, given mu + + (Number of optItvs[mu] smaller than stat) / mcTrials[mu] + """ + #print('extremenessOfOptItvStat') + #print(f'mu is {mu}') + return np.where(optItvs[mu] < stat)[0].size / mcTrials[mu] + +def optItvUpperLimit(list_of_energies, c, spectrumCDF = lambda x: x, + n = 1000): + """ + Returns the c- confidence upper limit on mu using optimum interval + + For which mu is extremenessOfOptItvStat( optimumItvStatistic(run), mu ) = c + + c - e.g., 0.9 + """ + def f(mu, list_of_energies, c, spectrumCDF, n): + generate_table(mu, n) + x = optimumItvStatistic(list_of_energies, mu, spectrumCDF) + prob = extremenessOfOptItvStat(x, mu) + return prob - c + + mu = 0 + + for mu in np.arange(10, 2 * list_of_energies.size): + if f(mu, list_of_energies, c, spectrumCDF, n) > 0: + #print('Found seed mu=%f' % mu) + break + + try: + xsec = brenth(f, mu - 5, mu + 5, + args=(list_of_energies, c, spectrumCDF, n), + xtol=1e-2) + #print('Improved xsec:', xsec) + except: + #print("ERROR: could not minimize", mu) + return mu + return xsec + +def generate_trial_experiment(mu, n): + #print(f'Generate Trial Experiment with mu:{mu} and n{n}') + trials = [] + + for index in range(n): + this_mu = np.random.poisson(mu) + #print(f'this_mu: {this_mu}') + rand_numbers = np.random.random(size=this_mu) + #print(f'rand numbers, {rand_numbers}') + rand_numbers = np.append(rand_numbers, [0.0, 1.0]) + rand_numbers.sort() + #print(f'rand numbers, {rand_numbers}') + trials.append(rand_numbers) + + return trials + + +# ## Monte Carlo for populating itvSizes[$\mu$][$k$] and optItvs[$\mu$] + + +def get_filename(): + #return '/fs/ddn/sdf/group/hps/users/alspellm/mc_storage/opt_int_lookuptable_max50_10ktoys.p' + #return '/fs/ddn/sdf/group/hps/users/alspellm/mc_storage/opt_int_lookuptable_max25_100ktoys.p' + #return '/fs/ddn/sdf/group/hps/users/alspellm/mc_storage/opt_int_lookuptable_max25_100ktoys_0.05steps.p' + return '/fs/ddn/sdf/group/hps/users/alspellm/mc_storage/opt_int_lookuptable_max25_10ktoys_0.05steps_v2.p' + +def load_table_from_disk(): + global itvSizes + global optItvs + global mcTrials + + if os.path.exists(get_filename()): + f = open(get_filename(), 'rb') + itvSizes = pickle.load(f) + optItvs = pickle.load(f) + mcTrials = pickle.load(f) + f.close() + + +def write_table_to_disk(): + f = open(get_filename(), 'wb') + pickle.dump(itvSizes, f) + pickle.dump(optItvs, f) + pickle.dump(mcTrials, f) + f.close() + +itvSizes = {} +optItvs = {} +mcTrials = {} +load_table_from_disk() + +def generate_table_new(mu,n): + """ #Generate trial runs""" + if mu in mcTrials and mcTrials[mu] >= n: + return + + mcTrials[mu] = n + itvSizes[mu] = {} + optItvs[mu] = [] + + mu_trials = np.random.poisson(mu, size=n) + trials = [np.sort(np.append(np.random.random(mu),[0.0,1.0])) for mu in mu_trials] + + for i,trial in enumerate(trials): + + intermediate_result = kLargestIntervals(trial) + for k, v in intermediate_result.items(): + if k not in itvSizes[mu]: + itvSizes[mu][k] = [] + + itvSizes[mu][k].append(v) + #print('interm results') + #print(itvSizes) + # Numpy-ize it + for k, array in itvSizes[mu].items(): + itvSizes[mu][k] = np.array(array) + + for i,trial in enumerate(trials): + #print('\n') + #print(f'trial {i}: {trial}') + #print(f'mu is {mu}') + optItvs[mu].append(optimumItvStatistic(trial, mu)) + #print('trial result: ', optItvs[mu]) + #print(f'trial result: {optimumItvStatistic(trial, mu)}') + #print('\n') + + #print('summarize trials') + + # Numpy-ize it + optItvs[mu] = np.array(optItvs[mu]) + #print(optItvs[mu]) + +def generate_table(mu, n): + """ #Generate trial runs""" + if mu in mcTrials and mcTrials[mu] >= n: + return + + #print("Generating mu=", mu) + + mcTrials[mu] = n + trials = generate_trial_experiment(mu, mcTrials[mu]) + #print(trials[0:10]) + itvSizes[mu] = {} + optItvs[mu] = [] + + for i,trial in enumerate(trials): + + intermediate_result = kLargestIntervals(trial) + for k, v in intermediate_result.items(): + if k not in itvSizes[mu]: + itvSizes[mu][k] = [] + + itvSizes[mu][k].append(v) + #print('interm results') + #print(itvSizes) + # Numpy-ize it + for k, array in itvSizes[mu].items(): + itvSizes[mu][k] = np.array(array) + + for i,trial in enumerate(trials): + #print('\n') + #print(f'trial {i}: {trial}') + #print(f'mu is {mu}') + optItvs[mu].append(optimumItvStatistic(trial, mu)) + #print('trial result: ', optItvs[mu]) + #print(f'trial result: {optimumItvStatistic(trial, mu)}') + #print('\n') + + #print('summarize trials') + + # Numpy-ize it + optItvs[mu] = np.array(optItvs[mu]) + #print(optItvs[mu]) + + + +def cache_values(my_max=200, n=100): + for i in range(3, my_max): + generate_table(i, n) + write_table_to_disk() + + +import time +def plot_something(): + x, y = [], [] + + for i,mu in enumerate(np.linspace(0.0, 200.0,1000)): + start_time = time.time() + generate_table(mu, 10000) + x.append(mu) + a = brenth(lambda x: extremenessOfOptItvStat(x, mu) - 0.9, + 0, + 1, + xtol=1e-2) + + y.append(a) + + end_time = time.time() + print('time: ', end_time - start_time) + + plt.plot(x,y) + plt.xscale('log') + plt.xlim(0.0, 200.0) + + +#plot_something() +total_time = 0.0 +for i,mu in enumerate(np.linspace(0.0, 25.0,500)): + print(f'Running mu = {mu} | {100.*i/500}% complete') + start_time = time.time() + generate_table(mu, 10000) + #generate_table_new(mu, 100000) + end_time = time.time() + elapsed_time = end_time - start_time + total_time = total_time + elapsed_time + avg_time = total_time/(i+1) + print(f'Time to generate table entry: {elapsed_time}') + print(f'Average time: {avg_time}') + print(f'Estimated completion: {avg_time*(500-i+1)}') + +write_table_to_disk() + + + + + + diff --git a/plotUtils/simps/run_opt_interval.py b/plotUtils/simps/run_opt_interval.py index 380b257a9..53494e610 100644 --- a/plotUtils/simps/run_opt_interval.py +++ b/plotUtils/simps/run_opt_interval.py @@ -50,6 +50,7 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): mpifpi = args.mpifpi nsigma = args.nsigma signal_sf = args.signal_sf +tenpct = args.tenpct print(f'Search Window Size: +-', nsigma) #Initialize Signal Processor @@ -81,8 +82,10 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): mass_safety = 'unc_vtx_mass*1000. >= 0' else: selection = 'vtxana_Tight_2016_simp_reach_SR' - inv_mass_range = (135,200) - mass_safety = 'unc_vtx_mass*1000. > 135' #CANT LOOK BELOW THIS MASS UNTIL UNBLINDING! + #mass_safety = 'unc_vtx_mass*1000. > 135' #CANT LOOK BELOW THIS MASS UNTIL UNBLINDING! + #inv_mass_range = (135,200) + mass_safety = 'unc_vtx_mass*1000. > 0.0' #UNBLINDED! + inv_mass_range = (30, 124) for filename in sorted(os.listdir(indir)): if not filename.endswith('.root'): @@ -97,36 +100,84 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): #Set the differential radiative trident rate lookup table used to scale expected signal print('Load lookup table') cr_data = '/sdf/group/hps/user-data/alspellm/2016/data/hadd_BLPass4c_1959files.root' +full_lumi_path = '/fs/ddn/sdf/group/hps/users/alspellm/data_storage/pass4kf/pass4kf_ana_20240513' preselection = "vtxana_Tight_nocuts" signal_mass_range = [x for x in range(20,130,1)] -signalProcessor.set_diff_prod_lut(cr_data, preselection, signal_mass_range) +signalProcessor.set_diff_prod_lut(cr_data, preselection, signal_mass_range, tenpct, full_lumi_path) #Initialize the range of epsilon2 #masses = [x for x in range(50,56,2)] masses = [x for x in range(inv_mass_range[0], inv_mass_range[-1]+2,2)] +#masses = [x for x in range(68,100, 2)] ap_masses = [round(x*signalProcessor.mass_ratio_ap_to_vd,1) for x in masses] -eps2_range = np.logspace(-4.0,-8.0,num=100) +eps2_range = np.logspace(-4.0,-8.0,num=1000) logeps2_range = np.log10(eps2_range) min_eps = min(np.log10(eps2_range)) max_eps = max(np.log10(eps2_range)) num_bins = len(eps2_range) #make histos to store results +exclusion_conf_h = ( + hist.Hist.new + .Reg(len(masses), np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') + .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') + .Double() +) +exclusion_bestk_h = ( + hist.Hist.new + .Reg(len(masses), np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') + .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') + .Double() +) total_yield_h = ( hist.Hist.new - .Reg(len(masses), np.min(masses),np.max(masses),label='Invariant Mass [MeV]') + .Reg(len(masses), np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') .Double() ) excluded_signal_h = ( hist.Hist.new - .Reg(len(masses), np.min(masses),np.max(masses),label='Invariant Mass [MeV]') + .Reg(len(masses), np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') .Double() ) sensitivity_h = ( hist.Hist.new - .Reg(len(masses), np.min(masses),np.max(masses),label='Invariant Mass [MeV]') + .Reg(len(masses), np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') + .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') + .Double() +) + +ap_masses = [signalProcessor.mass_ratio_ap_to_vd*x for x in range(inv_mass_range[0], inv_mass_range[-1]+2,2)] +total_yield_ap_h = ( + hist.Hist.new + .Reg(len(ap_masses), np.min(ap_masses),np.max(ap_masses),label='A\' Invariant Mass [MeV]') + .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') + .Double() +) +excluded_signal_ap_h = ( + hist.Hist.new + .Reg(len(ap_masses), np.min(ap_masses),np.max(ap_masses),label='A\' Invariant Mass [MeV]') + .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') + .Double() +) +sensitivity_ap_h = ( + hist.Hist.new + .Reg(len(ap_masses), np.min(ap_masses),np.max(ap_masses),label='A\' Invariant Mass [MeV]') + .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') + .Double() +) + +#Plot the excluded signal value right before reaching 90% confidence. Debugging purposes +excluded_signal_minus1_h = ( + hist.Hist.new + .Reg(len(masses), np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') + .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') + .Double() +) +exclusion_conf_minus1_h = ( + hist.Hist.new + .Reg(len(masses), np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') .Double() ) @@ -134,11 +185,33 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): ####################################################################################################################################### #Load lookup table -with open('interval_ntrials_10000.p', 'rb') as f: +lookuptable_path = '/fs/ddn/sdf/group/hps/users/alspellm/mc_storage/opt_int_lookuptable_large.p' +lookuptable_path = '/sdf/home/a/alspellm/src/hpstr/plotUtils/simps/interval_ntrials_10000.p' +#lookuptable_path = '/fs/ddn/sdf/group/hps/users/alspellm/mc_storage/opt_int_lookuptable_max50_10ktoys.p' +lookuptable_path = '/fs/ddn/sdf/group/hps/users/alspellm/mc_storage/opt_int_lookuptable_max25_10ktoys_0.05steps_v2.p' +ntrials = 10000 #number of toy events thrown for each mu in lookup table +with open(lookuptable_path, 'rb') as f: # Load the object from the pickle file lookupTable = pickle.load(f) +#open output file +outfile = uproot.recreate(f'{outfilename}.root') + for signal_mass in masses: + #Histograms for each mass + confidence_level_mass_h = ( + hist.Hist.new + .Reg(300, 0, 30.0,label='mu') + .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') + .Double() + ) + best_kvalue_mass_h = ( + hist.Hist.new + .Reg(300, 0, 30.0,label='mu') + .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') + .Double() + ) + print(f'Signal Mass {signal_mass}') #Set signal window @@ -149,6 +222,7 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): zcut_sel = signalProcessor.zcut_sel(data) vprojsig_sel = signalProcessor.vprojsig_sel(data) minz0_sel = signalProcessor.minz0_sel(data) + sameside_sel = signalProcessor.sameside_z0_cut(data) masswindow_sel = signalProcessor.mass_sel(data, signal_mass) #Set signal/control region if not args.tenpct and args.highPsum: @@ -158,8 +232,9 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): elif args.tenpct and args.highPsum: psum_sel = signalProcessor.psum_sel(data, case='cr') else: - print('Error. Cannot access that region yet') - tight_sel = np.logical_and.reduce([zcut_sel, vprojsig_sel, psum_sel, minz0_sel, masswindow_sel]) + psum_sel = signalProcessor.psum_sel(data, case='sr') + print('UNBLINDED!') + tight_sel = np.logical_and.reduce([zcut_sel, vprojsig_sel, sameside_sel, psum_sel, minz0_sel, masswindow_sel]) #tight_sel = np.logical_and.reduce([zcut_sel, vprojsig_sel, psum_sel, masswindow_sel]) data_z = data[tight_sel].unc_vtx_z print(data_z) @@ -181,6 +256,7 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): zcut_sel = signalProcessor.zcut_sel(signal) vprojsig_sel = signalProcessor.vprojsig_sel(signal) minz0_sel = signalProcessor.minz0_sel(signal) + sameside_sel = signalProcessor.sameside_z0_cut(signal) masswindow_sel = signalProcessor.mass_sel(signal, signal_mass) #Set signal/control region if not args.tenpct and args.highPsum: @@ -190,8 +266,9 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): elif args.tenpct and args.highPsum: psum_sel = signalProcessor.psum_sel(signal, case='cr') else: - print('Error. Cannot access that region yet') - tight_sel = np.logical_and.reduce([zcut_sel, vprojsig_sel, psum_sel, minz0_sel, masswindow_sel]) + psum_sel = signalProcessor.psum_sel(signal, case='sr') + print('UNBLINDED!') + tight_sel = np.logical_and.reduce([zcut_sel, vprojsig_sel, sameside_sel, psum_sel, minz0_sel, masswindow_sel]) signal = signal[tight_sel] #Loop over eps2 values and reweight the signal @@ -238,6 +315,8 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): k_90p = -1 conf_90p = -1.0 + previous_mu = 999999.9 + previous_conf = -9.9 for i,mu in enumerate(sorted(lookupTable.keys())): best_k = -1 best_conf = -1.0 @@ -245,28 +324,61 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): if k > len(kints)-1: break x = np.max(kints[k]) - conf = np.where(lookupTable[mu][k] < x)[0].size / (10000) + conf = np.where(lookupTable[mu][k] < x)[0].size / (ntrials) if conf > best_conf: best_k = k best_conf = conf - #mu_v_eps2_hh.fill(eps2, mu, weight=best_conf) + + #debug histos + confidence_level_mass_h.fill(mu, np.log10(eps2), weight=best_conf) + best_kvalue_mass_h.fill(mu, np.log10(eps2), weight=best_k) + + #if the confidence is >= 90%, this is the upper limit if best_conf >= 0.9: mu_90p = mu k_90p = best_k conf_90p = best_conf #print(f'90% confidence upper limit on mu={mu_90p}, when k={k_90p}') #print(f'Confidence level: ', conf_90p) + + #fill debug histo. Check excluded signal value right before upper limit + excluded_signal_minus1_h.fill(signal_mass, np.log10(eps2), weight=previous_mu) + exclusion_conf_minus1_h.fill(signal_mass, np.log10(eps2), weight=previous_conf) break + + #debug. Track values just before upper limit is reached + previous_mu = mu + previous_conf = best_conf + #Fill histogram results + exclusion_conf_h.fill(signal_mass, np.log10(eps2), weight=conf_90p) + exclusion_bestk_h.fill(signal_mass, np.log10(eps2), weight=k_90p) total_yield_h.fill(signal_mass, np.log10(eps2), weight=total_yield) excluded_signal_h.fill(signal_mass, np.log10(eps2), weight=mu_90p) sensitivity_h.fill(signal_mass, np.log10(eps2), weight=(total_yield/mu_90p)) -outfile = uproot.recreate(f'{outfilename}.root') + total_yield_ap_h.fill(signalProcessor.mass_ratio_ap_to_vd*signal_mass, np.log10(eps2), weight=total_yield) + excluded_signal_ap_h.fill(signalProcessor.mass_ratio_ap_to_vd*signal_mass, np.log10(eps2), weight=mu_90p) + sensitivity_ap_h.fill(signalProcessor.mass_ratio_ap_to_vd*signal_mass, np.log10(eps2), weight=(total_yield/mu_90p)) + + #save mass histograms + outfile[f'masses/confidence_levels_{signal_mass}_h'] = confidence_level_mass_h + outfile[f'masses/best_kvalues_{signal_mass}_h'] = best_kvalue_mass_h + outfile['total_yield_h'] = total_yield_h outfile['excluded_signal_h'] = excluded_signal_h outfile['sensitivity_h'] = sensitivity_h +outfile['confidence_level_h'] = exclusion_conf_h +outfile['best_exclusion_k_h'] = exclusion_bestk_h + +outfile['total_yield_ap_h'] = total_yield_ap_h +outfile['excluded_signal_ap_h'] = excluded_signal_ap_h +outfile['sensitivity_ap_h'] = sensitivity_ap_h + +#save debug plots +outfile['excluded_signal_minus1_h'] = excluded_signal_minus1_h +outfile['exclusion_conf_minus1_h'] = exclusion_conf_minus1_h ''' #Get sensitivity contour diff --git a/plotUtils/simps/run_signal_search.py b/plotUtils/simps/run_signal_search.py index 2be3c7748..6b743e592 100644 --- a/plotUtils/simps/run_signal_search.py +++ b/plotUtils/simps/run_signal_search.py @@ -51,14 +51,14 @@ inv_mass_range = (30,200) branches = ["unc_vtx_mass","unc_vtx_psum", "unc_vtx_ele_track_z0", "unc_vtx_pos_track_z0", "unc_vtx_z", "unc_vtx_proj_sig"] indir = '/fs/ddn/sdf/group/hps/users/alspellm/data_storage/pass4kf/pass4kf_ana_20240513' + mass_safety = 'unc_vtx_mass*1000. >= 0' #If high psum, can look at all masses if args.highPsum: selection = 'vtxana_Tight_2016_simp_reach_CR' - mass_safety = 'unc_vtx_mass*1000. >= 0' else: selection = 'vtxana_Tight_2016_simp_reach_SR' - inv_mass_range = (135,200) - mass_safety = 'unc_vtx_mass*1000. > 135' #CANT LOOK BELOW THIS MASS UNTIL UNBLINDING! + #inv_mass_range = (135,200) + #mass_safety = 'unc_vtx_mass*1000. > 135' #CANT LOOK BELOW THIS MASS UNTIL UNBLINDING! for filename in sorted(os.listdir(indir)): if not filename.endswith('.root'): @@ -277,13 +277,14 @@ def get_pvalue(test_stat_h, nobs): elif args.tenpct and args.highPsum: psum_sel = signalProcessor.psum_sel(data, case='cr') else: - print('Error. Cannot access that region yet') + psum_sel = signalProcessor.psum_sel(data, case='sr') #get the Tight selection, without mass and minz0 cuts #init_sel = signalProcessor.tight_selection(data, 0.0, case=3) zcut_sel = signalProcessor.zcut_sel(data) vprojsig_sel = signalProcessor.vprojsig_sel(data) -initial_sel = np.logical_and.reduce([zcut_sel, vprojsig_sel, psum_sel]) +sameside_sel = signalProcessor.sameside_z0_cut(data) +initial_sel = np.logical_and.reduce([zcut_sel, vprojsig_sel, psum_sel, sameside_sel]) print(initial_sel) print(np.max(data[initial_sel].unc_vtx_min_z0)) diff --git a/plotUtils/simps/simp_signal_2016.py b/plotUtils/simps/simp_signal_2016.py index 6214b0991..f46411a2f 100644 --- a/plotUtils/simps/simp_signal_2016.py +++ b/plotUtils/simps/simp_signal_2016.py @@ -101,19 +101,38 @@ def sample_pre_readout_probability(z): ] return events - def _load_trident_differential_production_lut(self, background_file, selection, signal_mass_range, mass_window_width): + def _load_trident_differential_production_lut(self, background_file, selection, signal_mass_range, mass_window_width, tenpct=True, full_lumi_path=None): dNdm_by_mass_vd = {} - with uproot.open(background_file) as bkgd_f: - bkgd_CR = bkgd_f[f'{selection}/{selection}_tree'].arrays( - cut=f'( (unc_vtx_psum > {self.cr_psum_low}) & (unc_vtx_psum < {self.cr_psum_high}) )', - expressions=['unc_vtx_mass', 'unc_vtx_z'], - ) - for mass_vd in signal_mass_range: - window_half_width = mass_window_width * self.mass_resolution(mass_vd) / 2 - dNdm_by_mass_vd[mass_vd] = ak.sum( - (bkgd_CR.unc_vtx_mass * 1000 > self.mass_ratio_ap_to_vd * (mass_vd - window_half_width)) & - (bkgd_CR.unc_vtx_mass * 1000 < self.mass_ratio_ap_to_vd * (mass_vd + window_half_width)) - ) / (2 * window_half_width * self.mass_ratio_ap_to_vd) + bkgd_CR = ak.Array([]) + + if tenpct: + with uproot.open(background_file) as bkgd_f: + bkgd_CR = bkgd_f[f'{selection}/{selection}_tree'].arrays( + cut=f'( (unc_vtx_psum > {self.cr_psum_low}) & (unc_vtx_psum < {self.cr_psum_high}) )', + expressions=['unc_vtx_mass', 'unc_vtx_z'], + ) + else: + for filename in sorted(os.listdir(full_lumi_path)): + if not filename.endswith('.root'): + continue + run = filename.split('_')[4] + print('Loading Run ', run) + + + background_file = os.path.join(full_lumi_path,filename) + with uproot.open(background_file) as bkgd_f: + bkgd_CR_per_run = bkgd_f[f'{selection}/{selection}_tree'].arrays( + cut=f'( (unc_vtx_psum > {self.cr_psum_low}) & (unc_vtx_psum < {self.cr_psum_high}) )', + expressions=['unc_vtx_mass', 'unc_vtx_z'], + ) + bkgd_CR = ak.concatenate([bkgd_CR, bkgd_CR_per_run]) + + for mass_vd in signal_mass_range: + window_half_width = mass_window_width * self.mass_resolution(mass_vd) / 2. + dNdm_by_mass_vd[mass_vd] = ak.sum( + (bkgd_CR.unc_vtx_mass * 1000 > self.mass_ratio_ap_to_vd * (mass_vd - window_half_width)) & + (bkgd_CR.unc_vtx_mass * 1000 < self.mass_ratio_ap_to_vd * (mass_vd + window_half_width)) + ) / (2 * window_half_width * self.mass_ratio_ap_to_vd) return dNdm_by_mass_vd def trident_differential_production(self, mass_vd): @@ -123,9 +142,9 @@ def trident_differential_production(self, mass_vd): #Use the reconstructed data in the high psum region to scale the differential radiative trident production rate #This scales the A' production rate, therefore the expected signal - def set_diff_prod_lut(self,infile, preselection, signal_mass_range): + def set_diff_prod_lut(self,infile, preselection, signal_mass_range, tenpct=True, full_lumi_path=None): #Initialize the lookup table to calculate the expected signal scale factor - self.trident_differential_production = self._load_trident_differential_production_lut(infile, preselection, signal_mass_range, self.nsigma) + self.trident_differential_production = self._load_trident_differential_production_lut(infile, preselection, signal_mass_range, 2.0*self.nsigma, tenpct=tenpct, full_lumi_path=full_lumi_path) def total_signal_production_per_epsilon2(self, signal_mass): mass_ap = self.mass_ratio_ap_to_vd*signal_mass @@ -282,13 +301,16 @@ def cnvHistoToROOT(histo, tempname='temporary_uproot'): parser.add_argument('--outfilename', type=str, default='expected_signal_output.root') parser.add_argument('--mpifpi', type=float, default=4*np.pi) parser.add_argument('--signal_sf', type=float, default=1.0) - parser.add_argument('--nsigma', type=float, default=2.0) + parser.add_argument('--nsigma', type=float, default=1.5) + parser.add_argument('--tenpct', type=int, default=0) args = parser.parse_args() mpifpi = args.mpifpi nsigma = args.nsigma signal_sf = args.signal_sf outfilename = args.outfilename + tenpct = args.tenpct + #Create MC signal analysis tuple processor @@ -298,15 +320,16 @@ def cnvHistoToROOT(histo, tempname='temporary_uproot'): #Set the differential radiative trident rate lookup table used to scale expected signal print('Load lookup table') cr_data = '/sdf/group/hps/user-data/alspellm/2016/data/hadd_BLPass4c_1959files.root' + full_lumi_path = '/fs/ddn/sdf/group/hps/users/alspellm/data_storage/pass4kf/pass4kf_ana_20240513' preselection = "vtxana_Tight_nocuts" signal_mass_range = [x for x in range(30,130,1)] - processor.set_diff_prod_lut(cr_data, preselection, signal_mass_range) - + processor.set_diff_prod_lut(cr_data, preselection, signal_mass_range, tenpct, full_lumi_path) #Initialize the range of epsilon2 masses = [x for x in range(30,124,2)] + masses = [x for x in range(50,70,2)] ap_masses = [round(x*processor.mass_ratio_ap_to_vd,1) for x in masses] - eps2_range = np.logspace(-4.0,-8.0,num=100) + eps2_range = np.logspace(-4.0,-8.0,num=40) logeps2_range = np.log10(eps2_range) min_eps = min(np.log10(eps2_range)) max_eps = max(np.log10(eps2_range)) From 02c06a646edfd38d8929948a6cd4ed99c64448fe Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Wed, 31 Jul 2024 17:01:14 -0700 Subject: [PATCH 18/27] save --- analysis/data/v0_projection_2016_mc_config.json | 16 ++++++++++------ plotUtils/simps/expected_signal_output.root | Bin 86910 -> 0 bytes plotUtils/simps/simp_signal_2016.py | 6 +++++- 3 files changed, 15 insertions(+), 7 deletions(-) delete mode 100644 plotUtils/simps/expected_signal_output.root diff --git a/analysis/data/v0_projection_2016_mc_config.json b/analysis/data/v0_projection_2016_mc_config.json index fc18e19aa..9dd1d5b87 100644 --- a/analysis/data/v0_projection_2016_mc_config.json +++ b/analysis/data/v0_projection_2016_mc_config.json @@ -1,10 +1,14 @@ { "7984": { "target_position": -4.3, - "rotated_mean_x": -0.23135574671453285, - "rotated_mean_y": -0.02398086113913096, - "rotated_sigma_x": 0.2109738212614317, - "rotated_sigma_y": 0.08129743797473131, - "rotation_angle_mrad": -103.56647336269294 + "rotated_mean_x": -0.22984095484241324, + "rotated_mean_y": -0.027767688133970195, + "rotated_sigma_x": 0.22379266968589445, + "rotated_sigma_y": 0.08329193763283319, + "rotation_angle_mrad": -89.95092043750218, + "unrotated_mean_x": -0.22718604898785316, + "unrotated_mean_y": -0.048091565786742764, + "unrotated_sigma_x": 0.26342442474167516, + "unrotated_sigma_y": 0.10030767283985152 } -} +} \ No newline at end of file diff --git a/plotUtils/simps/expected_signal_output.root b/plotUtils/simps/expected_signal_output.root deleted file mode 100644 index ee9130e6969caaef49b2b17b9f394c25ffc764fa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 86910 zcmeFYRcs}}5+&%i-DYNHX1AG{nVFfHukAK7GjrQ*W@ct)X11@HVf($+{_JRW_H(73 zl%gu7$a54KRXWP3%yM>ca0LOm?*#z?F$MXd;`h(!|Mivr8R$RL2LE?40|7~;0|B9z z1^K~Uk)d z9xk}xf1(2a4;}-p0)I z-_6BU5CjAns{PZP|LVv8Qq}$q z`zNn)P5&XtE%>j1vHeF%AUys<0In+eOBC(@u8Qua2A2Q4|08I?^M4L9a{Mpo-+=#* zwf}J%|I4{+%R+g?ffVF|c>cKTv`|%*>!6nwX9g)Gi44bP&ZnX(oP+>}E(9M-OgbbJ z5=#VEN;E}^ha=$%I>ZwlQ;LTREHgz5SOB(+Ol(_hI$d)L0ONG{#FsKFDptyK6_&h} z-B2Y-NF}MD$V|1#k_X=?#@Ioj$Cy!+_oFmNr6ilLmg?9R=DdrGs?7h^SQHu+n^#pm z#Rsq2;6h_(%nR^EjHr^C4sP#+ORx@-?sIV_BEbgX-bTYh!`GRA6RwaIG%nRHwU{O^ zn4`1RE!WPjFnJBq6c|~JmD3fFmI`0`1_lQP2fH2!WX2df%OlDUr_euYR9l zzQuP(K|w(PC9I>s2rNJFU;kDSBO-Qso8grIb~y0-nBjcNW%{+G(&3tL6%diaK6d_2~-1rpr%22t(@OSnJQXHVCjzv=yq+|5Pa z+jmvG0&=_UeSe&*Bx*asn1=%N#`ngZi%PU}U2T9sdg$%oUh??e3gVYbf&a$$h#E8* z*~7nVC7{B-E+c(OaMb&UsdRrm%AVrL!F}Jl=~tat&!N-AjbYofR)4V&e@T3Qf8TtA z!QH^KOx?Ik3he3>l>EB6#q58|Nwqk5U2r0Ljb_6v!j}hZvJ~L62S<${;xpDajHqD@ zJoIXifeb8JI{#AJOk`;_+4O@ig!g|4X~8_$c5NHft`#UA*i21rsC4u%DNZfYL%b`= z?6kdQS42}TIq5inB46-jIW%=wlvkvho^B4jGq+lPjTPdJ&rIXnibptNcOrfkW27o! z%ZOxo5s{JH z>3-Sh8p#oS=e5ye&k@)=*%+`^NdT_e7|57O`@3hBhIaV?@n!k4>{(fPUmSJeSwbT> zh~rBaONAc38d1N^%5@55SXN8(|Bz3sUp2&%Ca?f%sv2j;PdWQ01^h6@N*{Soy# zELDj#w^Su;DDAeae#QZngq3c=aNQZg;=*jaPEY1AKB&fp4h=H z_+k9l1z>v*nF(8_JWv|3Tkr2(C>&@0mvu~1U@IxrEZL0`Uq|?t#F$+jZ?NA(0F|l8 zQpuT_fa2b=AP-AqpXtHnXBmf)xr7C$9N#dCsE8G8#Fh>d_sXU$0nMjlXy;5EmK~>PM-tG&@2-PSZM}+?TLMajwVe?3 z);0-wGu-)qr?c6ojbUQSs?T>OU=$UiG1=cfL9AhSk^u%vB5b4O&Y1d#ak|KS&HaE9h-NU`br`N{jx~Oo@`kFm@NcdE z3oc}W4NnfH4#hu}f4ss0`hBq8u1G+Cv)V#QFwyOqzEP&hc#0-d%-3hXVUsy5L zjakt0e&!SK2*t}=iI3NCi;D<<=hiq4g2=Mr`SZhLpZI%b-0lO9qv`AO^*Ear~tYsIRA#Un<5QCX%p>iV&!PjPuEY${d5GMXdT8lx5 zQJcS_77!Ur#cEcyG%vVrxo-G`lkfmmh-b#9A6X>=EPDt_j0T(K+jjBG&H-KTd3Rt>eG;l5Y5V>0 zo03&Vf)9P$Nw7#zQ-`Gk^qe1X_M9{==vJO&I=BIgcv*Repam;pS zwnBWLAVc-MKxEq5eRk&O{JT*Cd0MJWF!6q?!pPft)6AMWqU{UJW4PRP8;9ut0gUmR zJ`-(|%h%g&aD=cBswLvs5A+Av*{R-=#XsH9z;3De+LL_4yr(C>>Tv?JLv(OnQSfDu zIgv5yMz*yf;_SN9|UD#tOs8o`Jo)~$^~W1h@4gRseBVd-o#ZhylyaC~LjeD$`30L)o&f&c2gBqI4Ll3m zqwrcakQ0e^ZKY*Y!DAJ#lxVeZa4uOzw_hSOfV_1O2b#mAs_hpR)V%MCpE%XQ%9cY+ zK>TdKu&{lJiOC`vBQosDWq~Aw1?oS3kt@oz`ErS0PxYv&EAAS8j^qQR6jAUgOBJ<+ z>8MM-5|9-e1*+6AJBCNU`dSi9z$#401wd(g_UEcpI5hPkdY!v>EG){{=zf0SY*cIX z?$@Q0o&BMnq(>i?i8x@Tb=NZSK7wb7^k6WsvmX#uYM<8!LPmqq@U9fO_ubq;-tMX! zok}>|?-%eubiLxen>YPDrJ*Z>VFd)bJ9-?9R`|@;8ye*b&`SVv*%o-VIO8Dqy8Ot$ zHWjN~dC~}cprqk>3cF=X1oc;Xp9UA`MpHL=j6E*duMwfv7moRdeTlCv*ba9b4ZJOt z5(Ba4c?$)+Mlpym^39Pm51$!y+-Kg3oDc&#B|baokGgg1guvraF4(@`W#`+tbDaFmgo&nGt$r^2aZG%` zSXjw=Gn$by0C@dni|`8dz7ci+owfb$HzdY;rMW?o=!!mr(CtuQw)Mj&sT*wvn`hO( z{`?XfKX8CL$6^#W#J83uV}9WmZwK)9DD9H!62j_DVrKNSOK$Ex!}H;~Z=eQ}-UvIBn*|jU0(K=7)FX>M zeaDPP_e&GjoHC~Wxs1E7^}}XlBie+?IGJMNY2!ziFm>I1scOMzY%u z%Ef5a#AA`=yV{cLClf{b_#*X#H_)rqP?akBGbXYvsx4n$v|~Dw+n*oEwcLd7G#^=* zwhNpyjXJ=Q1qI|#p0&!|n^sk8#s&&JnY?nPoSU$u`&SwNsL(yYGrCrNDn|B}j02{o z?{Akn=T=Q~)NHpn+?i$FV3o@autev6M{hxn?TTKIkx1y_ksor7n++^EZ&NC(w4dqe zZR24UVXnM<&m9L=J8S7{g;2b6L@$todI5H6b$WVTAhv)>n&ikULwc%5N0m^7IR0@s zulbffWd%oYiWWou;%48>d;G~zQN{#!?%q*mW`^BTCW2;+v-J_*!2!N&uWOpm*#o1* zA{)DqW~jECq!&d?cj_$t4^^lsK0d3UI)#G0$Eyax1$N8em$vkjKY~|kwOWepAagse z$asfaoBN;gIw3%zflT_&gFW2#iR@CRN%wwcOJO(-%PW~fe+}VU*D7{Sd*nnXYJYV) z*@D$|FaC`Z+{_uOF`6ElT_X>oA!HeebY$ax2=&BUss|6{sC-6(4L*S5lmN&~{V?Hi zuKsTmr7Ml^%pS)tD|-vpYS6SKxOT!O>|JS(F-xy~giWdiI?(OB(5zK`;DtTr-jlIQ z%+*h>O-+AKK7I}mjq9=nc^)W7?0v_APd)H7mElfRG=5J8e7U1DZ^UUPObz0@Q_~*e z!xATySaN;!LrD2vVhy=9eOr{7Hw2g-oa>MrQ=(Uq6IWhphiF5*rm!n-%`Wo`w$)9Y z(hk`nn2M1iTax#@{RG8na(x5z{Z4M@gb@pz;XLjxQ{J6EOX+2U=PiA#*u(nB?C;-U zkw)X#6%P5hee|<Yn!lw7M>SK)zET;)@q%*67oNrbRu3sl~ zI%Oi1-s~zWw)PSu{quD*Ac@eWLn+FW&d(AK)2YeUa^_JIr;0%-wZdYmN9?u zw{O#e=m|USdpvhA5db+l?Qq~_2>-OT>U7uIJs8Mn+XwnO^PT8uL36@?7|7Yh5~$yjRfegWZPMX==yu9Ci9a!{CZbn~{7i6o0+BTL`;-&(pezv@&0tVMSsb z5R0LE%NrQ*N)u-FW#&!qKX;W)zX>dT3T!Tg+pH;lm4CA8dpr?|Dv4oeRfhCv1m_Qc zl_} z9+C67YgJVkQ15eYbV?&6@B$v-;gVF}-`btoMD7?>H`e6Oaiv=#nofP5W(6}VY+Sus zL1GaoeBa(aOJLcmAgCuahJ4u#43t)MD>8_ zw=+j4X$`@KNN)VO_#D(_zKff+inzIHg5rR4i|WxIhS1FB7tn7M8e;o~V`=VR*!*_! zGpOA?B0$?Fz2<@fZ(b)XCMX*bk(|CN zm$BLLS9%*R{GKrbmyNQ1xxOTXU{~gX_ed|;_ZGQpZ-bGNcsABJMh{biA2U|gjEJiD zUN(1lYtLw;>z6uG$sL46B>Vg6P5Pu;Nn%Aj!LW5K4*MhNZGon7<+_X4W0|hZh$wc8cwu zh4DU{qwP<>5zT3=(UpTHk(WT{Cp3VKhzTG#W-n>Hs~%WJ)=3CA&R0I(s7r~15pi1RC!cg0 zu>b6cFHgjj@rI7qUT;r=nqcK{PN@Pv;7Z&5{NaRT^;fm!+69uKSe2Qt&M69=o{sAd z-DqDS(Qc*Uk)CGu{ndt^9Xu?<5@2R3h^gbb?S6nz?sQ4iNiWiAXJ4v1`K2p>2!HVaX71(3wAs=xRVC55(yB46oM;F?7}W5f07Rpgka9un;mM)K>fk{j1ZD|F~VS9Q<3o&#u87 z6KNF5MKt$SO*{HwXiq!!LR#GkD1(zp ztKpDw5Af9!%l4dCLltc;%U%GUhH`WA@$(NW{49Fwr}fJ3cJzjG~5dQ zv5)Zp?yjns`;ezS&eIM_O(gH1fd(__YsNFOT3nt#Z`wqJaczis^5eH*G1tSr^+>Rl zQ<4v5cfLkylnrl>@obga9dRn+*Oeb;VzlWIX`9_M9hmIT4&xZ^>HR|P zf*)6^9J-e!2sY8U{U#+zcTywnFV`3XfX`%xk9rHSr$a`P_*h%BTu9au;- zJ){RA7oh!@2;nn)6}?%DX!7_4RKZD(616>XqAxkHWL+Ho2Xa+x?@<%?P*I9C=Wn&) zwfC>E))7&sHw|(ya)d{R*g^$)+u^1CBkH*eZ9q>L2=z^0B6H37%R?tmGM=1OyyxgFyWq z=oOy2Up|y2yB~oCo?aU$*{KX`wfYmmNyR#?7rca*ZRm8=-P!}Op$hEGedr3Ie^Y^P z=euLwvDY}&dY=8|Qp#AqpzPer9;U|^;a`iC8b*OfRSRh<=;D}(0n^-b36bgzK2hbZkk%KKaQr7kB@Cvb_Xtq={LU@Xvwi=8r6YbW4Lbo{X~J5r4U{XpVYwY&ik;onQJ1q{EX>T~mYS{#MHmPx zXV}Tc{(VA{FS8RYuAc36hqU5zkrLWuaej7JEv1om=r(alUhEX`DI|Fe@Gs+6O3^yUs+Ottn-BdG{L&3uaaQTrt}bHM1Dg zYg@M#jHk#~r(t8<8sWEKkM$|5Ng_I(S-dNcJb-O8Sc6@D8*+`7-#U>ropv-f@f!-Y zO@gPqBn`VHMl5yw22mP;@7!Wm8Mp0r)Q9OCX5c@w zU;XscFdVwC-i=KjA(Vm}1|ZJ_NW?ck#GVgv_J3Vp%PcWVd<^rUtV8`I`-qUzG+q@% z`CG!)cH&0{EuiL-HY%RD5i4{t4wNj!Z&8YK6Wf=ER-k3XA?xgbi96j22l_A`pNYh^ z8a!LgEFmN5>=9t}567kh?~qjny|5Y#3@Br+$qNyaq?pOuNg|yRiDwV!NbpHDQm?j2 zI)!pBPYa}jedk)#4cmhht!e5gyd}ULI=-tcSJ=UieHUS{SAPhMt(XxC$Mu(T7)mi4 z;cRaO9asS4`zsueB_$c>__LNLm3L4BF*kY`2~-yYcplA$ifnTnlVZ%Dv zjLhK5-xL03;|eK+oPf)=w5Z->UFd@ad#x9szq?t=Ows6Uzzi6!h~gvO_H^m5P@i0d zRBD2QB!EmNUhY~Ay*zLltZ~Dh>roR@77(&EP$f=;2RqqWiahW=4TDRFrzv&~pJB_J z6h9x^Nh@|AAzjLLnXf1Wp(bs~P8;wz`0UudVh^JB^dC!MakNDrTLBeykT&1Nk6M1t zc2*)Y$6q?+`NW3KKtzGn3q#7&&+0dMZ} zZEVVSScQ?-X63;AXln6np?S9bgZ1X{oq1tX>d0XQPgZ%(V^XA+-#lJDyXK4a$2=iU z103;S%;Z|&+@-dW&kmrwVCE{Jm_d^GvoE#x&8zTdC+8|8zeu*j5Di1$!dV6e zVFre-biZy8{*&$4-fd0w0qo^BzJC|PqaP!76+tmft{e{2gYdEjm{*Um;*rQVxLOq_9oR2x=J zCS@Sp?S6lIAFQN+`{{cS6nAmv-^>p#{kZ2b3Y@IYYsqeBmm&!hp2Ryks|rcqgjSIT ze!df)eL}TCX0c77%P@lF;jXc8=s$1x37LT{&eRsR4t6717l9+((!)r9YCR55wTM*p zCt{>FDz1M@D&Q^ z)P9D9(cY)xU6$I*M#VpYjb`A`LmHVSPH|InbXLS@AItDHrco{LY~1 zS8W&lZs#BxBunx2S0@Tw%E96zL|%~tY>;EVVHn%Ar3*+l7CAKXuDSVw>Qh+j>vO~D zbn@7wNfd7{ul5J>BtPw_Tn89mLgy&&KXZIk&j0;xBvd#;_(rBU=kib%VD!tTI6vt8 z`s;gwY|Y_O*-`;JCmV3j55!L(>hZIdS_Y*inci!JjT&u4?kwgQ3C26KbV==JAOTw7 zk%c?O^tU>BT20QXiS?$RwqmfI}%pYnbu!|nXh?hLO-IL+g zI)WpD^Ep@57;Z1w(SSZ=ScP=~h#gUkT``iPd*Hw%GF(2iQyX~i%4kvpEkA2f2h6+c zhuThxQPQ+Nt`k8Er24FR*p1v&l8WN0l$%h3j4`^eMG1f6fprgk{X@QaV?rGzooAwu zoQjtSU&6#Zhs);5&j@g?9Q4ox)exWaL+M zn)|O{=zV=_i&DCDk~S7elg<_#Y#TJzgh~H|29zBe3Df{kyCdj1fIkUvSYjO*Q{;N( zw&O8poUB9R+7P@@w13IcVpug(mX6SKM5Px!4pu*h$=EGg^7|rw3V=W}EO!Utb)@Jd zx|f;^pc~gYY|?jO&(BUe46~m=lID1Z1$2JFkn1{=*eYAU++1|hg>-U8ChlnSA597p zpC|=N?y?+GZWZX^ug37&g*;1_q9jbX_Ltw3BGe~Aq4hhICy+}YjghE?L?7$tkzY$l z#%XsSGDwFVruWq7nK6(ih&hi|>Sc27WFpKYN0V$@m0MoN5*s_?cS~#*aT-PyxusL($<}UvduA+; z!+UiFpqDB4pci^b;G__+BSXBYl-05yeAD#>-c?p=7+A9@u_GN=O**2K$e!EIyy+LE z=a1o6QwA3z-tu;>CsXkXylF55{CrJK2-v?k8vT5@`)fDH>fDUJ)QWuXrNx&rRH_yf z|5X40#91S6cRcGg5uN88BIT%L;179X3j5X!A3&f$aV`1gnuNo$QQTMghpxJ|ALR)oHarD%q%e$$7+*9Dbfvq@aA`uiSrG>LFWPwvt0iLrrXx&k<4Vs!DdRtSPT73VP3|iZk|& z45y33;?G?CbJw%Z8Z0ZFxOngtPvr1D@>MBy;4D(w+Ap2nU~df_1Jnt!#f1_T8M`bI zd>^>IKY~2gYnnr@r<00&il^~N05rAxcQbp|@uEm+14DsZA62^NQjQJ+aj%_8Z|WxM zadEyK!1b{N^-{AtPox+?b5lv+;2Si=hFt)|%K_<8aeTxY$a$EN!(8X?8(4|FY*63x zL?V*2?y=WIH@^d;`!OXK%UYC#nKdI$yw+$Du`mcLwH(Khr&3R>RI*b;Mh*{c)Cu(X z+86D&sndhh5FolP^Yk+xVi(m-+pET0UGI^I4ogYQU~{-K2%O}IJo^BW40=9sNZ%@; zdx~FCr&F-OD_G2ggA{H<>-%aqm-Y}aZ)@n1GR70FwXvk}owEP30VgMGMveFP0$R06 z;wF!}P`xl>6(mx}-#3%0x3_NZJiyn?rVkUpmQ3Oj0;QPl$-?v3*7EMy_|@y!b3N@s{wxwy zn@pC@S?cX5WpzZpIrx8hqoW+XOJx{VoX4$K@)MRowwE#EKwZ!12tWS#qnX#3R7P^E zro)aSJOnkhG-CU%M3DBdregzN(r^eZu{5DnId$R|dkhvif5a2Gj+J1c#NmH$&RAb% zDd}F1FQI+6gm~gIso$-r2+QF_(Vh zAS+)?!4RnWnwme#@`Opt(M}r#T~d9w>j7OS@RQBV&XlF!im2u$pY`9=4q58MT^6uN z6i)ZAN}sw%moQsjtSk}hLFJOxi`5gn_A;MjZMlCn?qq_iZwY#9Te~%_{U{FOLWA}U zCJwn`C)j2nAGS#(M{i@Wza_AC8+hl4I>Yc kD7ex{=X)(n3a-UUr;*7$iiV1Rpm z!#Or~MOp5-)UCvoARpUPF0K3C_1Ox`^gZueU$eDb zm5kQJx*jN<@VWPhAP}0^*KS8-!(Z3BtGPPT5)Ql3Dlk}*I^`|@w%|J@Zy(iJA)FD< zk5W0#2(3YESB;(myZ-k{baXLA#bA7vx#Yvl+<}PuGA&s~E_*Kb;yYI;pU@6}28dm{ zSj5J-H-8V0-v~(J9kUw_z%XxH46E{cDK^W=>=+JCBjyaPS&=CzK?|Y^&TWI01xu6iOH7q4id*n z_9IpO{-|Dy4mac0!5>JE;wt%yID0}$qb8=CKL07S{&XL^odLDnHCS#{-26Qw;bDOL z_3Jk~#OHCPH>zv6m_N(f4_A0|(_gPIg(v0(uuU&?-mh0Uh)t)33c_;t#v;&f*~bd& ziKPXM6_w>@U>t7Ot)7G0G7-UX~fiDAsX2u`?4Mh?i}1kqR5x+ZOG|g=bln z>-D0XIK$WcZ52;wZsZbcoM~W6v~+H1m5R72Uc!*1*5HbMb(HnqVjyOiK2x$5i20_k zG9RvIm}u4fa|Ni!!e0i;nUGwB2`e+NH11tEUX2AaT}yWf9TY{*XN6F6-yhU2^otxIh~Rd&1o$It?wqfe7|RSm^fC*)k%hhf%~C0un{Lk6lz?LplKvhW2`M%rtKYgt z$z{f*$P(;VRELP1-;%!I(7Q>p<`NkA zU?~ga#1E_-Kyrqpz%wZl$t$Hi{{#yokz}KPo$Yb{jFkOkcrZioyO=c=2Y$hcc=T|& z&it8oHP{HM&U`t#A0j;kprzcsrITa(2WU0X!CxwP>-S= zEOb<&fz?X^Oi4Bu-+5xihhS?UlHQbjf)#AeZ$Qg6OB3~|mK6Wim>!KW$40W^h(YIR zCwHsSb~ej_4h8CO!(}llvQ~L=R0o|>(i^CJ$=XNt^9NAZSwM`wPiRpnzhhz7abFIh zx3qZ1P1H%fypCAH3Vg^D7|%FD#tv25pJ)zj((+ip*&HRjwM7i%kfOe_ z!c(#sTnn{HEf&GXit@GAeY%IPVgmpxx!cBOVa;IDcDP3Drzl+6^94L%+kO<%XGR!s z@!8>}*mC{x(JiZkb)s-lTLo??+5I=>>|UvMyyB4z54e$2#>F-D3cHMi;(mE(@?|^%IVes1($BV2`bdwtw~MSgc;y?6IVqh-vC`6a zHMA~3rLWXO@3ZMHs;FzG(kLUbGZwbEVs(P#aa!`xdg>xerI%%?@qtlyq)Q*X=B60} zDF7;3tixf;p&>7+bxoQLOqwr`%Yyd|=?xfq$i=zCU=*W1pxhZN1MFEXUDxA4WOc

|szTrF?LY#uqkbo~0yVeel)oSFBT(87#Mnzn4Zkd;w#qPSEp;61;74e;mzYgOc5_2qiI)LeAsI4$Jxf}(a0u>FdLroFV z^&nRt4IoT`oS(l}_ZAPJT$P(wPc};2A&xzhR^8Iks^Xq_w&3fmk2#6*M9Jlk&gY{u zVR?4yuZ2+!#WOFdu^}B?bTExdhEGdTF}1t7vs~HKTM~|H_O@%lu{=voXdgN!lHZ36P+bcA?Sa zE>s#~rxWxKD7za*am>w>hEz7UE6zfV%<>swn-O)qf}vAt z#L-4U7JGpQZs@bU0Lcx+U`c1DIwaCaP@&ap@|EW8m68@Nb!yJ%aUL)cZNzAUglNqlN6giz-xZ;olQiV~zaq-_A{VclcM{C)6i~<-RnG zeACq4rH>jna6`^0VThzD5_TT>n%{L-fmnTIQGx|SD88r@?aKyWmY-KI&#Y7IlKR+n zMha1_uTQq?Sn|#u^lIZWq(t>4jMiAt1~Jty@AZu^h2F+6V1#MO$cj;}E^%m?P)rFM z8fn8dv;^igk#p^a6;NLV(_s3Ac2;~n8VUHN+L+sWKkuHr9Cs6C8Gi68PrgZb;+h{M ziXgID@4h?lk8R+Nr~geXH`LGXYg|LDr?mhYm(L|RXq;0~fE{H7#v-qz90%hpOVhK^FLi*`5U&NPTuj7$|31Q}ae*~6mgmJE%+H>6qMPt2xQBdeXd z!o&ZP&dkwhDblagtkI1ncsyz8VtG>vzzHh3`C}%ZEI$VhEBg8DLj8VOnoT%;uH)>v zdFwjBLyunr^u;&IE+ZY5d+@t{u98^Ri{a2{OI6BLWfe9b4r?Fu@4xD{JX_=>8BB^` z;Q8ec`yvRpyV{K|;TMhN!pGZ}7a6gt#|xQiZ<49A5o2M%V{Z2DQp3L$WYBCD!5yc= zRC;1Z`NRcx@AJ2e#KFq2yS#Z&TmbEUYidfi-h)}73r$WNCB_T3ZUaYEIQ>4B@Pzo< zAd9CU+q3nV(GQ#3-l5|WU*L6y5355kMk_(WPN|rOdEtzsPNo`efm^QRvgH}&Xh688 zv@UjRKudz}0GwI>RcrNr6=^sbk)iRMD>~u&V2bwbC)Tpr=euA%lEH-wU{i*jDV|&1 zCO;!0SH1zJ0Ey7m8tyNn?SW7j{%c(Oq-XotIIU-_=cXi)>V10}M^wRyn>vl)gS@q)zugtR- z0F}Pf(WrO#5b@@K;osBt|?JLDhCCO7!x0#a&1K_Py1FozB$zxB%R z`!$hQkUM-^(bK_Nb{>VZ_SRok)X*JUY5IBU(@z=0$>fkI1h)3qYRdVBTbKWc0zGMr z)$SPzTy}lkX5nCVP^a;Giv&9c&x1gO6Fo?qesu4t#(ZfDlURwnf;Csjk}hU;Haeb6 z=)dOhemT~{)>sFVMaXEZ82o#N`@R4g1iGcuy4%RCB)|YcSUQdYzgNlejfY z;^|-Xj{1KoM&qrxMyZ;NM{YK9T;dVClW%@#0#|K?C3)rWeb2rH=7I+5z*aJZ(vP{x7tZqegX6RVt?bil)bP&tGG z^A>$++q<`8@K|O;m10lt4tX+m*|;w(gOh=s2wdSipzh6c@IkIyjN}J)j%l}*YTV9s zAM1ugi%E&VbFfUW44s4}#Vj7waN;qyOMeDCopMiVTDDH_hmQ9WrdnjWPxIBY{rSX; z8^_}z-*MbapQ?~`l_xHXmk-WND9Zt^LB$Dr8((^358Feb2xO7b{Sd1uBlF_2TKQZX zsz^tS132&Wpy6=S4w(_-9A(YiSe8R*?!wtZk1GA#(v#}~tsYEH*VU)WtJ*1?#n*$= z9n-o8raVgziBQsT&A&s}9y*L7prguzX7Fe1 zF}JCLW)vjQr7VF2Odf7bX0S7Yo;8-PRHaEklV$CXWJF)1Nb)w{qer7(c}^%>@zbF@4_23fnc^5TYALlm5MbP^9&G+-KTV24u zEcg(gb;Z)MtCL)}#gd~)zNYE+bD=^V79z%>b-&5vBc(NyPA!uu+RAd_t)mO)Pr`8a z6uAZoX2j}PZyAG+FaBNXfl+`?=F#Bwbc`14zA5}j@+J$=l`*_^tELK=v_EGI`-2~_ zI0Ur2NRb4EzKvsa1^V_6A5J5{3=?Wv_}o|zQWFf~k4vBUyx;0j2@M;5p{f5O2}7-r zH}uk}zUVLEri;{DXJ%!OcuhZ0Yu;acd8{bO6m&xsnA-Wv;>q93DlyL}RPvlYn--b+ zOS`W%x^aur$UB-kdBGAL*HF_yTjfz6svF@FhL(xqjaD@gGd?Mrks5{uM@sdD#N46W zzaEE=^m5)m+jDk3!SzG&v8OR9-@NLo)~`R#U!gbea{Rf}`Ta(|q&Ms;9+qDc;3J@< z^Lsd-fcku4kaL9U+O8-GN`27&7|+$!wk5un)C5 zB-1*rosw*5anQA{>*MfhThNeYmmQhSQYh6FG(@#e4CqCcPj4Ba8R!j|IP{^@Z*p4? zwup6NubA4;TOP)Wv>ND*DMqT4Fn%&ynV7ZPoLb?0btS8Ii|^lpDd#Vuvt3Do*xat% zAE77#{Q#j&^2mSBgH(yhfFW71-98Io?t>a&YNWg)9tx3l%e+VKt*V*E+wEdxz_d(f zb$CmO+H$6%EYA;y?nB-ie`z zyr*38M0?(f?e_g;Ru*y9jk;hOgI?Y5^6MX4{d~N{Kp2mp>fAUCY}A1_DWtol!emHC zp7IHw8GGv{B~Ct7*-RPrl*Kn$>7PmpciI6ugaWR|DUAG*7yzr`W&bLc9xi~10Q~Kme7jUX|~6dnrOj2@NDGm$7pVvt;t!n5Y0X#i|05DMw2;NX@v0{ zX#B(EZxuW2zVy^%s$g0|qxTa?`KA2PsIo=5IrPoGGSZ}24|Q~RTqGljK>th!6jYc2O&yJ^NNZD| z2nP3Lzm< zKz;9#)3{X#(1e|sHQT-cv?T1*jE@n}K1`g~!np#pKG(eM)lLDe<9>DNa2(KdvrMv6 zw*z&n@*Zc&1W@FgBBlP6GUhC+2K!BY>wx*Z#fpEZ~e&{&`PMgbw%FRB^o5(7w6)f+^-V-^p1! zDmRozTP0sSEBbz+wLJMbiXY-=xz4w0XZ{0Ps?gdi{K#6@&_ zmF5_yiMWh2+w`K+KrcE2vp?uNjqjtl)+?yziZedzcUCm|^p2PQrwKIgRasr=V~*D8 z^n6$f&Czb6_UCS)Qot4NO6OdD4}@MFL{YCcfsFOcIlrJ{pxk12mAicgXkJg@Hsopo z-JRUKFRinH-g4ZmzH|cU=jsfuoE!s&fIrk564Ah5E?o{P?6+2p4FG{45&PFS=c4vGd;?V-m;{;|rCN%53 z5qIyz3p8Dx8#;VF4^8>5paiP27wLU$Wr^B!J6D`m@1cKp#;d>H57A3&s!%3pxu*9+bb3ZhJ-4b(QmB4R3A(8zODsW zmC9!OQrCd(9Q#{iHa=h{d7Z(;atYW=CrF}>yn+2@e1zSYAh4@94y_(}0-NT%ib8)p zuoS#`629^an7^w(cQ`Nurl2!|)c0wCQBat5Xfqk;WBJM(UtR|~^UMOrHaVabc=}M8 zVG*e7&*zR^mjX(ehQ}8T>402xJ4$fc0!Z;cc~bMa15poOQzauC5HvPeMKA;4RRxSM z)CL2t;-6xTqFuCqQ-d?~{&}>`o<1R8#(*~S`Q=I(c+tlFFy}>bGPEksMs#D+5-mqu z!MlGN&|iT)_ z;27ts%L|B0-$Hf7@0U(Lzlb^w{&|Kn`JmC)4${h-`C-&Z@{)&<-bx9{;9t^yC4$w;fFAaIvGogbOg18$dLe0tG%;8G1d!A1}d z99OnPDczQU{RH{s=MVhA+8ET$G|>$#GPRw9=L>MZcJM+O}YcHXYyk43wyUL$eNSe|8^`*D9M1Z{0BPp`dQ!M=|tQUYJh(5mGb z#c1QxXobF4%1Pr9TIQM^_jBJwOM&;xaX!zWImdHcq3;2WIS(_x!K*svz4zl~rO~H@7|8fr!LOs8$ap@%zF})O@?v5i3TIG)D+j@Kt?Im+4uawpT{>7`o z=X|oUJhLJ20tGG5SlDp(SNsKrlXZ&)_ENwSxx((A&jIX*1jO3BiNH0GiA}{E0Wbas zrz=goz+2q#5*(iZA6N1%oyr@)7xMg>Rh}g9>2iHZB`5+uDqZEFv`pZ&WU^q@cmh1y z0fu7?`@k*#lk3UM5OBKa#lq6m{_26;LsHq zoSJ_WWTyaK?!a8cS{2ZqjgROi`2y4p8aWk|oj@^#f9+I}E0D>tsqnXc2jW<_2`Rb) zAbdzfF3+?CxYYr@_X29s!A4ZZ&}0(YCx15_Zoh$cqt*Y)S9POJ>)L8HHBGdxMeB1T zRS~WJR%1|~4n-?vR!94hacIR*+#{xY8!h6RFC`USL=(zx{XSOI$GrCsJ}X|e^g71H zVr;M2MrBd6v5a=~Fg_YsJsjl5Z$~o=Q#+BQ0%+r`oy2x#58x=2&GA0}hvo5`g#oT} zK>4#xsQhji(9<6={bVcv=In%biy|Vx5k=d;b%^nY{`50)`2z3oAgJF{?9uWH1XjcQH3DCNK}Z<~v^WQha5XqJxZ z=*z%c=T^OV?H=%y9x}Cg(F3UK8jicyX0k&2dic2=>z%ngjKX~~9Fs+W} ze+kwChRuXPGfGyVcj=8vraup~_00K^kL~~sZ=>{XV<=Ec<8LW9-3BrRhcJHO9UvYb zs9hv32mEY*B}~2noLIMYQ+hgdIN$9rq=SPFG_3T)2j64)x^bSz&H&m}eDBV4E zt`uJGtVC-zYK%?S2SkVVZ1n?V3RA zAVV(qnh}_kFLZd>IRg7VdA<6H7T}4YDXss#2z)(S(WED1VS-V%tJ>lAV|&g-9YUE z2ylN}8l<@ee3L|}H3hA}n_;W&c8(r+Wd9w$lj#L6uI;_y(h*=^VjQ2GZ3Nar>$`_5 z7^Vpq-BTMX9m@G-4WQ#Ai+b#U7bUf$}zxS>IKB zz$gubnJu=Wj?RE5&qH3ut%lVhv7ExodVqsd$2&NI>5*L*K5eEcpl#*8{LN2gm`+UC zDC?zzHVA63KNU|$>#qCHJS&;eN>1Ct;s%4`4m?*FQl9t0D49><}gjrh^e zi`=OFRUJR;{W3IKeEP&*f+|{>k^Jr8Wrx+HN@Z!a&OjuKb9D7wJ5bH>EpXcY00y;` zhl|Di!1|*sXmI&2a1ZisTZ$(DKX<;GNks+-7RpdwAKC@sTGkJD28KWkH{8nn?GlKM z<)#0APY&XjIB$`=B!YPT2pRXKeGpd`4Wgqy0U(zymm*FH7SR z<^UVcskDU#QeeU1ehN|hz&K!0uyv{$=)V=ej0|=H+MkyG`5E~@J;>|yhmsN~CjL;B zSmOY3Noek^4y;}u{?au+@f7fU)UEyv+5qmgg#E3WF?7ID?5XnhFWS{q5{{i-K-(%; zjV5Y((Ppm1xeGjjXk(xHk614`re{^tUX;#2vjydhXaD`@3mBGk%J%a4FK?}WQ~%mm zunl!p@Md=GA~dnFoOf~xt1EZ2S$rP80bH)%8#%A;0O=5!a%qe-&~C5CtI;O|^KF)4 z)5#;?>{G`ZrPKjFM}0{fK*oL zW{F!5Nb?pJ&Iyr%beOcO%}sZZ`nAlKXTAVZieK2Mto=dadqdFcJHjAVKi&GaL>okJ zjjD$|=)>Z&PeRSu1VY?VOWZ|KAfTGds<&VYd}STu^sKeOYgp2tcK8suGcHZOu8#$d zk_oE^H~WCid;LS>c^_aQWGb2ycn^&ELxrYV*MUBut?15TJJ337l#!W+0Cj*uEBnwD zAZ@Z0IJ+AOgik3~zhnO&{fyP7bf={B^l$tPXYPKI`I4sUE_ zW8a5icT^SK656`A+0pU(JzCd}`4bpHh~}@;X5qU$I{y8S$e#N-bLqeOx-Rvk^m3>z z>a*B*zxz)Q(^JB}-Ib$5+xLAgWqPpy{tEvPjTHw_Q3mPeX?+2P&Oy@TJttu2LZ$}; zDZr~xsMPcG5Cnzq7hJxa528AO0yDnX88Qjpo4&d0_3w6mm05ejY@(+q?@*XrUa*nvPM$(cYZ5paHVtsi>sqC?3|m6OW+Xs@i-l0cvq z?Q~p7nEw)hwpZW0QDAk(>W>RQqip}7MdI!BTT(U0aTw*d@nyA~KgJCsYI5tEM`$oK zFQ>fj99pFLsjr}O5A8qtemC}MArOn$3fq~S!G15Nc!efKfrT>MaqvVPaI5W2^_3d~ z|2Iv6E^-SH(co4N|3U*21L|GzUp|72dewJw+E|c%e?vORS{CGmPNphvp8)yyA0AVh zzX$n0-6^g5!XSSXW~(;F2lAed7Y4^IKyK^Lj`5ENAiJ+?xIIJ;GVfO+$b)4;I(S4A zugVi7&)mK=Uy}>sT+@rvT2Ua9e_F3#(+-3xt`ADTXvFS+`=({_E#Swu4aWw#fmiIq z>EsLm?)$Cb>Z5DG;q$of#fv6jji>!-(Dw?MMij12a{Yfjuah6iJOXs4H3R%lT!3<9 zLcpHw43LaWh&s<_1JStK%7wB!Ko}~cPgdUwxV_B*te!I1vgH>R6+BVsz~TgaC(1>8 z+Ktg2KdsQNQLL{Qe=XXKo%(T0Y62~Vgd1%)1|9Q0dhp1X&rAOpH^2KvCika z;X1u}$2tiltJ&|Q+k1l8QP`>Qjsy^Ct(}h;+ybFenM55vQ4mPqBqb=+1ipj}u2+FB zz)SchF<$Tp zxfhUmnBa_rB>;)0kvQs10HPY6b=f^O?0j0~>hGQcoIg|AZ=<)-;cw&n6vo|XKW2uM z;pi*c^QV>SndwGb4<~LcGvCMhwi6uv4KByLkCh*8Hw}z zKBg7hIG&t0;LFiw$VUbO)r?ZpMprN}Gfy{Ir8)!mCl*E76HFlBliebG20OQ__~xvy z;DfXS&ru112gt?lXLvv50flc$b9fh3LCLFNHip3!RHT%$>vWVrCD-o9!bdkyX=R-( zH&X+ZPJF*p3Y?(4`>CRS^04Pxx)1$n%s)L#|5Ap4Sd zX6V1^Va}D^k zfAc09#siNd7j?nSX5c(`!uYX93b1j!{GR{J2$%)0_9wiZ2Zr32KHK{EK+{fD_AG}K z$p41vq%AW8sjuyv95oRT|MSY^T)YkVg+1=y9vuK~il#-%IRU_-=q7tZRDce0o<^To zszm!iiO>6RTG6(Ru;RCvTC}{FTy(x6|CsmjbCfzWYX8lPwg=SweV5XVCLa;2U$6>5 zTX7s#woAW(NI>-uQ~ChV)>&{(fP(JaUbmz$~sIrCaxJjCV+KRqYg;XD?U%hjUYZQU{?eGLb zEH6=auKN*dO#y25%)ho>9)juv?+>vdi=cx4B;U;tNC3bj6vewRP{g?<(!rfs)heY65GGutl9M=2oO{A_;d?`se|mDy>*x(@s| zh9db!$br|=kc74WpFo{hSH0i1;I^jcQY=rC^Q62rVF zIygmkL5=AG+CJJiyudt+RxEs+e*U`hUmU(A>|aPbZVaKWb6UOpp)b%Z$LV)!Lyys( zta@jGGbNCEHdLL}U;~EJziuaPP6L;K!zOdCE(k=8dYa$A17c^=*hf|HLB{*V%s@*s z$j|$cRH|=)vg7A#BES4V^?-_3;)&%|CZ;(P;|J$PL4gwH`sbIgL~Z7^u5h~~}u z+JhDqEA!kRO3>(8#^0=Z7*-h!x6G zRn`Exc7L|yI39$Gu5Ca8jp#q)Nz+%=GVTw+|LV+~?)%NNd9TZ3>;h|`NVmLPa{UxC7( z0I)UXRcdWm1)Sz3^l>!Tf*o2pZL&vA)(p5~BVPKx=iljm5@1vMS3J7A0{kcCYUnrWL5z|2nscrY$UJ?=Y*XEd{XRu!&{!j=l=ai+ z1gwF^4>Q3B@$-PbIt-qloCTP7<9lXF4cd#btKXB$L5DofGhC|&v}er@McG0@`$LIh zN#7tqZ{W*o7e4~BfbufqC4Tp!l^K(4Q(x&R3iU>Vn51_E>+N zB8;3ibf_B0URakldSn7&PIj)N!*#6hzI4W$=@j7Fm^RFtRG_1>c&*wxEDq|IPqNg+ zqMg@)DYtnO(aL+D{)ndOW8TO2)YnwivySmUBU7QOq-?bITTRW26Bh`sgvJkqDgxaP z?&b%ky};NZI_2MA^paJ?YQ1W5rxmlH*=K<>usm$p1LP-;}bOhYNvJ@b+@@C~ug)a!`coi#E z<^k`t(d4Js_E zX_NOr_IXz1p;ihIe$Y73&N#&4@RxJjt{!krN&5>QendxMOx!o^Gtj{vb0{w1eYA7o zVO5|KD_U`z;^_V7dd&NHR)C^IqslQJnH?F-H8_hl(^gg&!nbOgNx7fj4qWNFVf)+4h1MMnO-CFNPsq6m%&& z(`S?oK!<1YTz%FLfaZZDk8SK+bE1fq*OLVGjXcf=f2%>&`@?otSrsVb&40YjTnGxj z_IDe!o@4o~tm%cXi6G;@wsl?X1V~G8L> z0k=4P==6Llw*do%?5RJK0Q$f7B_(oSg1*5duVixy=;2(VFu=(J zo%JJk#vU@z4r)xK@FoNFl%|IO{~Bn_-G5+HhkX~C>Sk1nJfQquVOp7w6cn~Zn4Zt< zf?Nw*m&vXf$b8#bBl-V(qRG9XCO!(pltyB{WM2T`XT)z_U-icH+>2K=+p)Y)%53d! zL@IFi4JMlt)&qN2)h1h3EU<|F6PL?%0{R^d%uOuG-DmbV4pTCv1Aoq}qy7G@eZzwcpwy_kV%&QP*nA@-Ig-{u;6}H? zYD+OlhV0#eWOI;@8jXx;$OIKD+s1F9L!epU`#LFbsQ5II;5!4d@TZlkwn%Bsv$5?-7xucQ4RE3y?^WZQGjl` zAyw?Y3+PbEE2b#y0chCS($-!D&6H}+9JWGGYwC>=A94W|YGY4}+XtYiBe@y!iy7n& z#^cJ@X+UN~)0IFZ6eP!kI_{Z}fSB6RN^nmh2tOn|-{S290*^__^|oSwkMwGhqQe1j zN0bq^z25`&pqX*YCskgurLZvC3yJOc+p~kVX#aJM58OseQNg$J zCmtX3J{l>kaFSs17|%*&pDxC9F&v3@%nJto!EGXTv^;%nt?!JJ<|p1?XR|wdA7^0X<&LIseC3 zuzTgDJ4GfJzyl%+PN7QB!l#dX`ArzqDXS<4d(MK2a^n{PS0Ye6cj1-?+6KA(uqf}p zzd`1A79&+Z6-ee=HU8Xe1<_5`*D)zCL0IF-S>Hhq5Kw+8_mgrRc#FR4U2-)Bu75a! zGv6(N?Vn2SkR>@Vv*Vg4<}3nTxJUDIwOODdwQ45%GU5Z6ZWP9jlU^18vcZ90 zEwVj8Y3iP`n0Ys7D1Gi=5F-L@f-eKDzK=k6=9MYiyF4%mp*_!j)e?-@GxV!i(!rQI z4i}fU4U9h>Im}F@g0W&`tn`9882Q-oR}Bt*6+nxAyUV!lF{l%cs}5cr1QoWeKwPdWP!#5VP6R0+SMTa4VgCwb z9)){wIc|X@&tSn*VOkJ%CUcS6&Ih3a(fe1_Gl4(LJm{j6Gw_Pj51ngO2hK4M*2$qY zVBPxg?kVyGCMBK3H)h&E+c@uRaQY%pe77Gxipl{}4fn!E{g;Gz{J-s-W}kL|E)esO$uVsp%Zy!!XVd~;jW|_ z2r749@pD_0g67U`+sg_ZpuK0=u}YMT-NWJ^AIzKr!%*D~t8iX0CJ(l0mnH$@>HBM` zduw3gvcHj$*%!AR`5#b>DOEB!Y{YLwFHt6r(VObu04SN33B{clop!0e_ zqu*K!;7OsR6Nd+Aw&*zjGnoLjd_UIg8EsJhr13Q~Fa;DU$AW?sgh1|m6K9BN8_2MI z=DX}`2NJ00$LkI(Z;cgUp)jij!GDoHLDz+WZ`9SG{^=s{+)BW?)a(ZwNe`;p4H|%@ zwS23vBpw)9hR-~#um_qy4R^XazXG{tP>@??G7vY0xg-&X0})Pt@|r#y;NIq}?&i&GkpgXV79;^uNC7Um_Eh zaAxu2IIK#$SY1EBblsJ+*GNW`FkRkZEnwjl@Dsn$IG`ND^lKM)ut*05hAY=>kA8yc zifHmP#~eV>E6$u_cR;6|TP*41Gth55^Va!a0~nn-S8+lM7e0I-JO_Fw{B<SK zvJK*tQLl5ZZi2A4O|!nq2nclL^ir1@WBTM#D`Uq7a0|KdHuT&AcET{(Pt75~>_p~# zF`owLoBg;hQeu7Vr^9%&_kkI50k3!2tHEf*RJskv8>8WK+q4 zAj1?)@WtEKb8x_D;&COLvL_fO`3+=u*MWYxNol!3E$9w1HkO-W^~X%hnHgqoKxx}| z@YwZ0LztxTUym=Sl1jcP6~J^3rv7d(bqA1_=pG^Kt^}EGtCR0JCo$bG*gh|<4@BeW zy~lTbL5P`Ct9HKv_~YxzpSKkPPXk46!nq&7$(goIAn@Y0@}W4|mCO>VHw3i# zzU}e-kAi5O^IoP3Aq85UYx4Zer+}8kay0Ar0?>q{zkQ%s(=qRTLFcl(GFp!D#^^=9 zmFr?aI_E{o_;wOlm1Bp_@704~WsLzpmZwT@7s~#@e0WN7otmTO4xphgr8A>Z0I<^8 zILe&}x?BBq4}-hGp!4UJ!axKVi&_0TZxRb8C*KumFk$idULSPlPy|e>-K{o6T){Z= z?-?p&1V$cdUwoK3z>s)2?FzpW=#AD;UEcl;x)Q&!y$VjCt!h_6mEj0lCQ&i8)e@lA zwzRk|B?ZdgmU*O{vGZu`hx4Y+WssvKZBNCc0qK|NW-Z&MAbtZUTPL*zggZN{BU~9k zAVQT~>193EKP6u+7EuGP&5I^8&*Xva=h}K&0|7A0XcWe^+5!VXcxjpMOQ89DF4|@D zC6GOzpC};J2f`~B?k{e)0G<=+NRW>O;A|{ZUTvsEyASP1i4xqdH_ZfMIL z`qcsR{pop6)mNeAVF@(~+z>RE-9CGYF&T|EFFK509XsZ|JCEg!)a2eV-l|AqPy+=Z zZ~t;ulMmA|diHkaf@eTzGR2wDPa0$joG)G_H3X%-iy9XJfgM?mt1^_vo$uR0PO0iqn-zV&8$5(AwMm>maIc{cHvig5XHIGNQ`|z8~RH z!k$?F`l)=YfijPJc-AG7t}^ z4@LUC0enfeY9IY9z;P8Fm^yfd4mI4=M088hRs>n_kB25`eK|qv<=+;xGBQ8fdL{ua z#oI(W#A5Rl*)mP4c)@5|O42f2eGLt}R$QzWvp?p&W3t#j!r*@p2knHiRoX;*0p;^^ zSM-Ocfa8QzFBy9<2#>bdxj(%FvMSGh=h9>K>jN4*CWm6s3^Hht58wiArxl+o1=66W zHsn;CNxb{=@TEbgFBq4|Sq-!~>DIGOVR~%9mPJ|N9Z*Yn`=KX&3Y7KI_4)U7 zLBYRt;j35{$PT@|$lId>Qn`$#e_eP%EW?5&V$BGI<~8Lqexw6`GnL(M*VDkWe0Ard z5ifB5QqoZW`xaQ$9ywm@!umcn&ntOwBY>{y63h7yF+d}IaX<`j63E^}k@$%|0YZ*v zg>J@SfV0dw#r7HV_YJA|`Kn(=yHO+|COvFuJ;I1*jg|qenpqUQJ4J++$b7$@ACO0L z=1KZ=5u<1(`(w@-)+#jKGOt}}7LEpim&8r3_kYBrHch8XtRMc<`SzrK`M9*C1GVUY z?f20s;0*tvETj7nMBa`}%`=FAY`hmow+40|w2s|#kIMti1e23ISD%2kDejj>Yi`ip z5!jw;Qv`!cVXx$Ce87m8;(YaI6)?(Ns5^aQ6^wEg+ed0bz=-Pn@23>^z#yIZmTtU1 z=&KwR(qjGz-M7tb=9wCx9q1B}-V+Dt*|o_6LP5~L6?Bj+dH^bC#6G89n+8P@HiCxf zHIRFp)4gQ>0%UkP&k>!w3gYnx$p${dApG}DZTBxK5V*z0^v@0Jza@xTJOe)X9&>ql7>wiz5$y1;vq_E7l6WR{LN>5OxKZr{X5ox6Yy** zR@9P<(192JO@W$Hw7q$-G0@kBHs^TgvfRhe$}0iFn-4CbrO2~(jv@4DHi)f=^Cd5u zGBnEUd4C#B^je2o;d!Hx1iiYhLT1!E@kI1$(zoL{wD>m|wMu&*nTbD;k+F{$h1CotqySfvtI0z;1uSN|u%V0f~Li!X)}48o7@J{dX#`eYfB5;iA5 zHx=K*Rdo_{$PNv1*@&@vFz;1E&@<3@d#8u4f)iAYXZliYu|DZw>I3mWHISP+dv4^> z5y(VbsDAO=3?#6x?h4k&6tOKn^|ZMk1PR7D?uGCG?~-X!=lplzs`L9LTe$&jl6Nk< zqhw&3k26wY!Ft3QsrQPaXZ0=+% z@FWSz+9Dd&%Tu!B#^&bb4Wx8gtWoEmFJD+5{rCP!<7hxeZ2PI>I2>%=#ii|52byZB zFOnxbfSYlzhBh+*M9-Ye`6%rLvgr}L@0!;^*){XEPXilhBm@vF%VGkNvpzlp?|IO9 z&)OA2iQN;AZJs<@+64Vhrh2YYT+pBW+xMAM7xdpfn=&d&0KLkb)LZ;9psV^aV$JU# zXm?u78dJOgF#I011i!3?0_|Qn zyZhxw3ff>ZcL^?2K`Z56IvdliXjxhMa%1mPH2t_kumANjn$-BwMTq&7MvX=&^MRd_7pJyV$_c;%=~j!#YA_dk89u9m>N@@CR;91ho)Pv?4Ib0IW- z=i)1Ef%~jg<1b1p5Ea`RYRFc#PLNOgs50H2;I^O|E**hyPbPKeT zBZqKqp9ftH5*(J)^Pq>^1AK0M2R#8X!C)L;(5+qGB})qcov#^%G1AqbtvMvH?@tA& zxoG>7T03ZTW_)@uRt2iUL!?B$x}anjeMn>X8RUaw40j?6K&Dv0@Gi~`khmYK-fSBU zBDJA`2NHT9Xf*IR(Nl}C2*0ce?35uQ>i0re(E z*600>po;rv;^ONRP|QtmaHpXFd1|^%k|Z9Gd1=b|$!rxQPB~n-{o)shU~hJBc45Al z27f<|OD4eklR$j|Zw)wCq<(@p<~NSLtZgbV1B_AIT{KGRhOA59b4Xq9#h+^MruorQUw!Wh&ZFlVyyMI)OGJ%AQXp39a1ClRoLz zhvxA?tzg_k!@WzeO_Yiv8m- zaM*!5X}&OUMth>4B`=wp|L~%EqZ8p5Z^jEaiT!2tJ*f_(;CxyAnbF#)3_@K_-h_utu_+iC6m)^ zidqCtt_&fmCwRberGNL@1Sv37eJBS>a-enP4xsij1WGTzvqlk3K+?p*8kuAbgwf8a zZ(KV7_h;-W9W5brXkLd~Kj?^db%;5=j1bx)FG(R~_dqL2j8PLxr_kJs)PH1sSJ5N~ z-mtJ}9U47rJ|~;qjD|C>TYP@?6!pcg5DHseK)o_<$wIuhQ3u^Mp<>BB)JAeRm>5rh zetH&@$}ZTU#xnO@mMH;L&7cq~WAI;Jw5am3wL(Y7e{rZN5NDvu1)2ipZu=ly;JmHg z^HCfyUlNT}HX$oW)+(2VrQQX(z6%z&KRgE|vdDXMiU*(~o)Tb1FAZuB7BlC{T|q59$rk!#z?ppq7bwD2{%BO27r*RNORBT>i*#DQ5@d|Mqrj3@Cx@ zi^7wkZtp;fzP79^3iDA?Q`J7*Zv&zG!--m-r+{xyx>lb3DR6&zLnJhG7uZdXJf=ga zfm!DlJJ~7+&^wUxJ|ijv8ZW`~!C0O_9$MU&#D?YnLWA`cY@~qiC7%CcY7=nAa;OC# zwxhk4S?60Pu>3z&;RP?f8d?vwH=)a!L#xWAqd1o?qd9r^LW9|PG+~E>63cYkyY(+c}^7DOLOAfcDyudxs;^Ks+NeFUgTDCo^3*n z%C;?U@&r)LUc1}Jp(6BIKm4n^jlwbSd9A^GEnK?C_=rcNVV~pzN#F+rdGO`NUSX?6Lb^!Pxle+q5>2!#PY>K4J$Mr8AU?P9H%c zz^mFM5j*!qfF*=`83gK~;Q%}1US&5?lBKCAT z4Nug~=aWov%@TF8RQOu*#GsZe;_HF;vA=(QJjhMDiR#fg^MO-LsP5&~*=xn6sOHi` zQUrk~aeZ-gB5i-AX%=^3FD;X5+w~z55O@8s0v`;|gIgDQ*eF$vy#GdN0 zfxvG9t(%oiAQG9GCaq2fQqnt(X&KcZs~V-F8Tkz4>=^HnMM{8tW9BM)MhglL8Ms#s zEkWVMVAJ;s%x7ew(lG=T$?MLV<4K>~$j^Om~n93*C$XAitZW zGu@jAL=&w$+NrxgY>V5CyLIO8X$3HD)%f%w} zYlHl)tIJQ+GPp7DX`&3D=D3^20{?uEdn8UF@JcC)b$()1v?l+yxmKPJEz@PUcI8B&dB%H#ViEOd z@<$%ig2n+FV;wKM-+c!S_x98kbr>!lv?JycRWES+*Q5Pk3< zTXp-ciL%1;Lk|sQQCc54*K`cyG4DyFPL>`M|M~8BGrV=HPhtA*Z|@D_m=s_%56$h) zyA53WbzgIc(ts~Cp6w*&zZN1Og8!#$&Ecjo==Ip9n{a6+~flBVS#9K+MzG}!)sd{-62`E~8E8ud06ijT&eL4AG$l!?!8pkE$jT0{;;RM)&< zgJ&;|s%miFoy=!Jm3D1Q_ZJdS@oB~O#PV`fwu;Nzp70UnStqLVaZRIwCOn_Y2h7-> zcH5E20PNkI=S!=37hKTunhQU~nF)^L5ci-*ErTNF7;oF$#kv2f7Kn+`h!Q3pKi_uu5IzImCsSY$240nx$FS^?e+oBm9cZ2zlxM~MjQljTErx%_d#IC zV(Ku55d`e@GoK6P0iV9&j9CMwGk34hi4scyC!yMDKhh3hdG)h+mZc3C>#EC`xN(8b zF3eh}IsvF`+%u?h&4H9J`a!G{=9fD&JT-{rp}6NA`dmm3&|ZIb{NE7+w3SW9sCiif zt@BO_gs|eG<#W~kj}RxCvt@PHyz~!Emf6G;+)zWKj$HOOnz`ujTpnL5tseT5-ipuE z5s!MfNj!)6$+7#Da4q7E~G>CN}H*gm(Xi7ewRsOGYzBW1J}DtA4< z7ZW^yN;;c2D|R+ek+VPB>t#Ze{XJIocV8?j_C53*D5*v1M>Nl4f}&6+X{?O1n-EI5 z{rXpV3^_^|!10{G^5RF`g<2QBEgtj!&}{r1JI((f4%+znUH+ZKAP`WGPK+Pn1NCoW z=dOx0U<~KYJaYI8tkT&nWiPS$n!ZKl{g7m=f5IG3jh#!}3_6fLIte@^9mb{{;lTZ< z`RDK|7B@@7=^k7^=^IX81J&v^Ei`Umw^2ReC7f00j+@ zKK;{`iS>O535m1FkIJ#RSw;;7ntF7=-B4fnM;&ds+E&wJ?^~|Ee}>r7+R;)DzHWV{ zDVn2TD4?LnLlZv;lrq+3(5PnMjUaLz^!G9yCAaTo)Ti)6qv0$b>hkqFn@Adg?L&I? zT}6l&{VZQ0>U4a9zL#q#1{Or2hK2do(tKCcl2T+~akUOr#(rp^uc$?za9?s&Mczgq ziht)e@o}Qu+a*rxvr{OGT*;U7eKks}%Jk`%CP#%L(}$uQ!zk%{(fXsAQ1s@GZ7Nmj zHhS8g+Sn$YhN2G*e6{{$q5G>X!ng7`|3^H!&xvP#ZIb91uTfXGN?X4GTv8z}eqL1| zyHf6Shg%HWuTb3QM7Ra?{~ThXeMo`HI5MnF51Y#@-C?p z%kzLMaar(IqdwZ_IW2JkHyUkzecZ&%l!Mk(xc=Z>cg5!Pmb*?uKboffr=EfBzaRIM zl2-Y#hen=;e5;6AMuQwDJ=HV=QEy1fuVk9bsPm%)t{btxIrjd; z*WfSIqS*VVe7h$rZ=n#E&-G}B4}00$T`uCW?J@6uA1K{%>;HR)U|EaIOi>fiKA$-+ z`}QmlYLH&h{5AsQkcnUVU>&He7XGijGyTVMi~2YbLU9{1lgt&Vlrr4EJ3}F42#E|C zlL$#D88Sr1kR+5@gDFntAr#_N$Q(+hln^=+&-J`G&cE=ycwSwf^>(fO+1LK9z4q_> z*;=bhE6=`PT-R1bTtKG(C6jmD5XBv68 zQMTms+@*U^Nuj4#*y%OrO`&02J@P=T4QrFJCrGBp@Y}$EmPUvZOGox_8869TewWvM zPO=5zhZq`d$dQFfUw3*6GvObygIkBA)f_{^D(f(nvTi*vdlys-k-5{ZKHzKWc z3@|hd-Jvwnsf2_gs352)9TE~!5(?7Yjda~TV;nip@p;cV@4fFIclnuc&))0%tM#k3 znYq?S;x3lJ<9~+8Qcv{UE}#028E#D95<(E4snyFH{!cL7QC5y`PTZh7*36D7sz#&o zPkR$+Uln2z9`$;wye3|;pPf179Nqb$LCVA>G1-Is?bXwfq=ai!-ubB=WKtxDg^p_` z@iYtFFLvyx>YgbfCO4P`38?wUg!>?pii*2+8}*Mr4Hl7!w`dCPjPde0GGIQxF2FJN zoWT6+y~pZuWNVcDg3`HNnu7M~ayN2C_O$|(rf@?8=G2?o!Dfzmsc(2&nQtBkFy2$(0Py-H$75)&v95m5U5fM{QAT?pjx#@(4d+WG0S%&1oxK z&9(Vxb~|TqAfE2S*;zo&Awh$0l|?JF>N>i$=*yfB(< z9@g%1nrq-Ky7iP;#2D}Wb1%IsOUZnBwPE4`D8t-Z-B#k%E*&`W%U*MoAF&w zOmxOvjSR!GJc@fh`=6L7KO;TeebwtnI~GPNbjpnTpkT7{c1flE01nYkgtD3fD*b-S zXvUd`gT=96c2=F#X2y|;Duu>K495;SN#VzRMg1I!H8%V3+ALv z7q~64Y2{RIczCLHae=eSDjYXG&`9RaZE;u}nx=Q6K20;}>rOuxmtgV7yb;7}&)8q4 z`&4uEWI&Su6Cu&_O%XA=32$Qtv;*@5Z>Xntz$}QBZ`{Rf;K~fg?pM)MQL>w2PATry zSC15Y_R)wA9t#K_d5pdnb?ckc`>?P)xsg|s_>x7dhHcTdMo}hC{yx9lGubQJBM)n< zcMQlr$jV=8?LwzYB6#7F)U7~tvsS|2^eK+o)4VeIc6`HkLne)-#CB~2ftOjiiz8>> zbS-h$x;aB#8<-zHRh^{}cyv9`orpEOP!}4)N;7TwMTGeVakGZWQoWXKbfT$AOIO`6 z+DYq1Us}Tin$HoeWy&b_d<-}Ym>oY(qbI8PxUlR>7Ob?6ODMv*zszFihoLTfpdd@~_*SM6@+sb9L#mpxTbg7_1#fTA`r)CT zS-dyg+iH`@9Y^Qw4Ub5 zwXSf+3ZWqlBi4D3RS$s=1LDPA13P$rJZ;{s zm@;xK@lv}OH(vHpqkH2#0s)q$#MsAKMnNCv$I&_|__v=os5jQcMwIPK;Or+%xiUUs zf(YT%iNCd^9=}feq{-Q+!VO~z;Z)}fSzVLdZSHsMN7%)z10HK#m2pZhaLnbBr zAGXsZ>a zqB2Edte_3@%LR11-0@;XMHzd1q`WOe!B0sC@D{Co&w?Cfu1Yc{3kjG`)}LucedvpP zx_G-rq*P2k{%(-tnLl*zit>z1`SS$oC5$SR(Srifrysxcr7#(2-BY#AZ@w&T;-M0# z_6CR9i+n9Qqtt({cX~|THzNCEF2ZdU%muXC3C~v#d#$Nb_(a-OG#Z-g8W*hSies$= z@_O8gZqDf8F2#N2B(=DYnikO4E@R089qqvjI)9&Y#QTBma=Tj$xH071F}XSJh0ky~ zzTatji!4$@H6)eqbb28!9+P=)CQTHt0xL1dK)m<^DRkJJHiA42HI%^d^1ChB{+Dl7 z1vBf6D{2nj;&>kO#u(PG%bIfPEm0uMmJaE^%Hhz(EMNb;A@-gL8S2U)^0}UfDJfcI zSxtO;G|HUat!5(k{bzi^gF9DRXZIWf_hb>DM>aIe;N5Q9S=TDUW|eDn9!wkByw)~a zvc)+)gsc{RGJYTU(o%z*&vy9Nkm|h#b>7I>4&OAi_T?QNnX43p9)M@DO0tV zhiY1nvk}{MfMZAmg7dA%Us$i*8}m0e#V(OY^|U`!Ian9aSjZa&{%nj!|C;Z~_2s>T zK`jxKoNgYOsJbb1HfFX5TML@kV#7~68XlL-#paCi5J==UG96j0fGvH#&^dh6}rp19Bk zFDA_56|H=+l;9i@aX~zTUd*r+YR&M4wRX(csFZ^QXVuxMRXl{xtscJ82-l>&T)g|* zN4rKePz|h!^+_c|>A9AGx)}58%C;l!WM-#N*&Phsgp9n@%xHnyt(-x~4DQ z5^c$WLlduV>}u zYi%Se@_9QCj9+%M1>Gtn&tKxEt&ll?RcB6JEKDpOP^=tfm)6OFUh~As0^PYJ;bjnU zy{%qw2eB^kW`x#G+Pk~i^iS>jsACH{S4ZA9JKs}#CwN>yXMZaAc8@{`;X|g;(9DYt z_4Cqjrv4xtp18Br{K53Q$BOq-vjWkHhF@eud@Y&`S;9+9W+`}uM|SJ2WvAmmFuBkj zGV&YkYjh8j$t-s+ePBF;;?0fSytT(m^k}|PChn{o**AL)n*=3j-$z@A^Z6m~VnD0| z`gt)5YOL@`@~Y-GZcJ4eO9B0CsHgP)s(fMFX}>FkkHbn}yPNcUDPivO`B#~&DUXtn z%Rw1O9jo41<5w{FZ+l3*{wyUmKW7w1i5xg-zCA36OaA(WOW^i3Y}zP#doboHFO?|e zE4CxDIJd{o04QQ7+8;u=QCz9ZF{XLEg+Y-KcNer-G4nD}G6i;I z%_Umjrg{!MiBQ)QZ_mb8atgU`qS)&r{Ro=2`)d92VpJXUdJK-N&-*6tuJFrv?dbd! z4ve_(@t3=(eFok^=*G;J-4PfRa!V94QXAF{cEtMSD7CxP0$MGEXgc$oJgi@ai{5YI zOdJ5?b{tMrtzOZdU7z*M4n`Jhmw9C{WOSvXZ5Im{g~9f=!1B8{biy?rJM;3H(q9vo zpFp;pMn1G>`XhTzUaox+eWl@3EO90!WA~U6E4sMPh#CIGo_^o)I`QZ;>v@dJZLKf% z2mSk|ap>4SRWj(TV+0jadZ(of<4_>UZ9x{at+Fs0_36xrVS zlyt*)(M+O|7WiWg)ox2&lMD^dabxsqtKMQU9HXnE;+>P0u0*LCxn|)HbrJTOLq*;^ z9x~x;^eqcR_NL8>PU9m~C*9QIF(C=flJjU*GofAmU^-OfdWOpHDsrwYwuYi9-KS`j zN^+n%^wsUL0^TT#ewm`v(2HQz>KY%3OE`=P;$_#|pGKbaTk^lTcih|A%~-DTj*U{E zNp_!N{@c~Wr`g`K6PT4^wjA7&4+XEXRSijIl@0p|`EFj{3Ot&v;q6yj ze__OMgS(<3g4yr|ZC19S@f5^$%?7JdV}S1?-!!g}Ok{3>TTfx&j;ir1YSlL-tOo*T z=NsoSSTxU7dh}LKIcru^WX9oCM{> zg<7mWNr7X9_F8jgsOM>M_0ug%v(&1I37#2+?dJPsVo3!zX#=B-dKV;EWY%Xf#~6}r z?%K6zd;6W(&g!o<1;3$X{pey|W&45uMjf}mVdmj6hfw*LWI^o`l8|Dw$fp&J?)qEI zaU~yEhCYd(Q)?Hxz5{_cRY1tVGnV@VHV7mMLI8nMIY1zg1&X$cjjJ1$mYWL%>;Q35 zaH#MiR%fzUX* zJD5RSU=%>?Ar25nH&+@P$BUG}WDA(Z?+22NqZ^FZ-3w&EtORy+0yq0q%#coj^#yXO?h=1%848lmBX6RDXce zfX!UpT)^gTG-iOc(3snUU0s2*|6_c&$DhZ4C?5$%_38yuesKO7&N<;vA^^g_tt61m z`=)AOCKTY(|L++7la$E;nY%+A%^_ej`(JI1MoL=dCs)6``Jt2_hkh^&SJU`kGTa7d znDkeM(X|!9uGTR30qP7;YJg292?AxqFaK4`F~8HY+XbW_S~iAjSsSip#-Dh=IKOdgAjoebm1hh{7M3*r3%;#Vh=~h z{Yis8?ASj8_xe5mMsRQ)I5_jK;27V)<)MylxUeT0OQ;LYzrgnWJ+>(vTOW?i_A55l zH*7UaOIL^+-nSQ+_V{f^oQ(W7Ej{_`{*B$@Nd+Q@r?wS=m~zv|6juU|6T%Sa2*)Jb-?jw9RO^?7aZZw zWp{x9*qZ-)>)!A81pJy$vHwcIcm83kj_~Kw`#1Q2OGYKUK)`q3f9dqE;<*&esp%^@ zIsx2aNP;d6!7u$Kv|VcXv*QPnYq;Z&xq$K`w0VMH35)|=#Nxklcu5bzP7KDxYkNovE2oX3z9WD*J3(~MuHM4Pa#rY%n z!Tt;S`|-`^_n;sq0|lNNf-Fq=8si!4r2&8y8m~ceHT?pwYfRfS11> zmn>ja6GRdO`kPDqPbGt{1K5ki-N^~a*ni*y`Re}~9IQamcct-!x>$ffAS~eVUwtZ$yQ8a(l_TIFek4^Hzo+94oDLs2 z9XEbmY_Ry|0FnJBY#yj1H`LPdhwOn86mT~{73%@EcL$6DFcCKh7;s3Iu;LQpVq*?l zl?1>8$d!LTkjNdb@l!CZgV*B1lgx7DH!1*%?%3v9*w znv2r^N*e07l*|w)pJZeo)Jfr2jxoa51_{qUu)e*A^)jF?z?`sU z0(<;+{46{EH_%#c-u93m(5|rm0C;~N`fqwChM%$qX2<-_H7x7H{LZv1@E-W+Z)UxK zyb+B4Qnd=dDR<>?J{4lt3m5J;*ncQjfSG>}t7Bu~X8jG;6ZRJLw^u5lPLRa~ zDgWsCslwGs{BwN*zVJt*76d|Yy9~nsE@u9-HbKOH)+TT!FaW!`$lwpBYxGw@-y#F( zd!rfwxcK^?fe;w}4D@4RhF@Ulg4x)E5G8*!Rk?ri29SqtdCSoTXe%nol0o4~B#k@# z|1>rhKra~ryECmRv0Meb`lL7%D0w=~ zC0x)yp#diMwhKi3ko#-6+=+e)0Z{Pw!VL7iw1SEKAFS5@aQXjNTNW3qy+1OoUt2sE z0?oHA3;gejzM$cMl1({Xkn10~zxpPcpYjFNcIjJd1M55RkABME#srj!!*K!hyE1V| z;L61R2?i(=Nn4QzL?IyA3;?4^g0Oes7Xl0G$L16E)Snm772s3=1N8rkdLHKdap4i* zzt{8MLlca3`1n!JBTD{7-xhAt<{%{Cvu{=Pg(?T$M}UQOqCaB$e+U1bdBA~TPrriy zM~sJqo58`q^@}eQ)xTt__}`PK|Bn4Vo&8JfxC>Tx!G*ek5G6q%N4Vbt90c$W7_f+q zYN=`tvA1_+|6?l-R%WyC$g*YMqw?Q^ z)q}(8{a?fC!(sLRSFo|a*SjkS5fI0>cAnU;ddIMo2D^M~6*I!OYuJFI8fe|K!rOXg zU>6#5Yp@Hj9}C+;fLK9cN=N+u0ABuj{H$TV1t7q=!guU1SfVosQ4$2QgIl7=uN0xf zD1s+N4BrTXXGQ<-&~tHr`~p3{md_VWai|AS`RziIUW zLC4kI!IMZE>ISx_(Q^ae@VbUCE;)0 zDslB!WdL0iz(+W^J6!qx`mNOkU#Cv?P&YT&y&nHa5dV@28^>={e0%+0Q}J)q0c<=2 zjWv1TehH`x5$rA4-o^(EyJLsO5`OTX2_oWxMSKer-}}%G@U>wDUmMphszV0YP$VAW-H7e_?-4TB8mMa&h`1dPd%7NV*29t^9kB@0sEP+am1`Kqx-CZFr zux%;{-~n?aza8J3kdl9~N%e<2)dL{{CuqTGyO7ghT1vZHT0&f4Ei$a{?`gjG$S#5Y zhr$qcdlN8i+6B45BiJuu@dY6Q=X|@*>OzH#`W^0y!7nrz(ZJ%EH;oybDi8<+>q!4s z{!zc}9I?U9`;{A5xdH&E|0i)jcF+Prh`p0mdW41LfSG|=p|E)Y@RHwCmkdG#PKbq5 zr~a!s;aI{*`NW09Nv*Sprf7qT!s`1nyM0hR6FZNy&iGFX;cfIUFI zxq2-4`+w#4{}-8v_g`irdH{R>H<^g{UuPnEZ2xy8czORO8TuzA00xMl^H)ZWAO$ya zf}g7qC{>ZQ6?tVrfmK19fSiFXdj!fqTR8&epRF8Lto%DG=e@%FkF5Or>Vx6I#}6w9 zmg7IYrvX(PzHDXjen#|fju+-j{$n@`ZSenk@CPoewvT%YbCg-Rz`fK6AXxqaYVraO zTfikr1o3hL>TrmK32bu~Y;OW~GO-5ftDt$tfpW7zUI?o-R@y{H)tt)mJfI-Ddl8p; za#&a%bzVY7e}vqg=&DMfjVu+!qQr})cqt)M$_3}qEQ{hLDQ(_+lS}&}Gms6wbwSU} z?EO|FG5K3v%b#wk^;o^|>04%smz7IY$5U*3qr?(9Le4OP#=xMUq!`IrX8LI%QAXP9mprF0 z>kwR-PMrxfObY2U?jQe{t~5!M@gVI*eoAis7o_V-H|^}M6Dt{c-!j{n0lsygAur?n z5DV4B5Vs3^__&~iTj%i1bu2J8a>FS;KHiBymU;Kg23dxc?nFWW5j}Dza8_`)zI|(x zT8JzztNz64qwi)t+tl52FhvPoD5@W8<~#J^XA%fXw?+V(ty6 zKoU}H;1J(4Lh)$KRvapJ52t40+cVKQ_ZdJn|>%4Ywg1&tdTPj110nztcMfXrAxczvc{*>s$ysuSzJ4H_7Q{BWQbEK4d z$d?7-a3>QVsda7hOWMPXA!4aDxtZhkTn%_22>51NIz*G@*{zX0#j~(Sh>yEl5Lcgi@$atPrjbe3Vj_mFP$&p{3+o?G_UnmbA5BDq;1Qo_e`u+ z#j(7r)})RYgHhq~qf9NDhSQHNFFPsBgq-m4plR&2`%{;u}=>f@uj+Q zI(Xb#Sd&{xZ!v3$Ny`6W8tU60A{m_eYI9whWMD#FJm#sv-E`*raXpj z7++ZDXfL89*Ul*;ep<}2@cXjavu?w(PH8PM*N=LrVjL_g;?w3`^Ee@!aao~qrYK`e zwM}y@?a<7Pr0v z-=faG2LH-qe$ut6KyOrqMU?FIyN?2S&fTK=l=@tEXju7|X332kv-+K}k$6X(Exu~| z>6}YtT3O-N^%qC8`|#ta`tQ+bhwf1iSz+MsFO_g2(Z;)(5`JRGxvP@e)djiF%0j{T z@u)T?X#088rnN23fZM>OR63g+Z|`Eg$NEJ2nt42(*Tp`H@3!?@8j%T)Y}cj?b>+}| z(4`2EQVJJWy+`Sl{wPFT=`YTLHM}2tpRKq^CS&;a*VZIeYnzvOYA@4e_^PVs2k+7G zClbi21dN3rP3u7F?+>VBmOV=cFE*aKo8wCrf}f5S``e~nKbLS?0iNtP}-2{!U4 zE4l7PDUujFZ+Lj$)2SE|#k=zMH0p36Y?9{q4(Q(FDV3h>l~?;60qg5_R!-B03)df8 zRlj?$YR>9*MqZp9<7Q}lbT?x`D0;{wLTdV@=f3t!g1nAEvCE%61=W-;6cC*>KiaF#-v9nXU9 zYTPZ}vw25T_)zVtDKi#wfOaU_XmA}~oV4v{(XULd^ET6^Ii3-bRl1LXh~eN zVz;U;ogs^4>~~t8Z^zyc9pkw+$a?<%WM^ffgQ(--D{Lz&{rG!RcWOT3xHPQ@o<1zU zwqH$GAyF{##dDtrja`Q=&$IsS;u@xph`b*y?Kwqq96Q42J_yT8I%E=KRCK%1@NElAICq zhZQoi#txABrCrbIU42b`dcD;+2AQG-`d3XHa2kg^%O#d>@{!49mL&{Y2IU@CKSJf* zkMv32@uRInIHl*^zrnXJv!T0DiH?J@cygtpSNk*W+J|K6?Nj5AY~(wdejVst()g4w zGL@|lwyj2Qh-1H9;!4z@hK_o^Y?rWRN1S`+KS#ntAJ3gmZbLJ1XBE|PswPe*e(vmf z$$l?7Z`ON3_GcKZ#pT=zi_e_HE8p_(4SSAKm3;O$Zw?y4@?7pWVc(`sagYVF!vp%zCLgKe|a#;FrDo7B_*r)^%m z&baW&bQR-?LsRxqez46|O_vY`@a}QX>!$dd*E86knucof47!ur8d;ClK0ltb;&ba3 z8TZrx2i1-E-3he38f_SxS$oYGm2bPLwnR-l=unLy-zEuBcUHGmOmiXh$yjZ&_Bw}d zevgQ#egJkyuYWm}pnvESle6WLB!-uZX{VwyA5fws&y9R71Db-O)m>ChF*>*9hNV+( zfR($w#l7xQwNOG;9`&PuiP)36ln9Vy#62^_K3C_MP*zv%0N+gEO{sn)H-a%%t<`TK zlAbH}*qCeH2Khw}_1#^j<&PFO47`0GAzZQQ=kLy7mX^D{NvJn2YjxUUMv>pitiyO#*OD#sW>wC=1yW1JMu zniO&mR3ctpmN0eRI=b(ApXFNba|^Z%v|P3+K`X{} z1MWI)y<60Z@jT5A58ER>Z`tgwTeI=5R2@G{-cSvWDXsYVAjY*ZG|0zT2$&+>8xtv# zelC>%frM&>YY(bwNHY( zUR-^y{l4U-^TXMR&bPr`y?q-}w@fxC=i66`xDmm%S0}6aI!QhUo!wj;WCQQ28{Yd` zBE<26nE6A5i+*xNF<<@HyD0As2Dv@TP4gMF=ANEYGTluvChx5_^;@i!Rm?!cGB5Mq z>7xuilP)%oI->hZMEVJtTtb>TufXQ(9PlG4uU>OxCNhin=r2Aj$!JhaJ9=ZZtR`k_rx2|x;1Hb7$ z-z2N^{7ML!5Tb*}X{g0PR#pkeVSKgQBhiP40_zf$JJ<5Pk@dGo7j}ABWF%5=zmrcx z$`@FULur5dE^$8p&isnDt_iQ-sjnpUb;bY*+=b$%bu^n|oZg<73x+$GM$0WSk!vYO z4v}7D;`8KV1~K6?`zp{XrkUI&w`sMi+Lwd&4|u%d(W~pxF_Cq|haTcz&G%tI7%6mM zRkhvNG;NCZp-m;gjWwLFuI50u%Y9xKTm=P(W@;~$1;3mn%CEh(iAa(tKA2V9+F0~< zy>nxM^Qz62uL5MpSdSZm^gnD0l4&m!8(xt>@ZZf+z*l>gVFTF+OT}`rI-|#VSd0QMPxK+-^O0`HXMR?TdZX?BMa39|?0a_p)*+C^ z<{fMDo1#wOnKy=&$koAhbO^EdUd_elzz?RN$B7J3f-Bq43++kEkO*IOj<3Hli! zmfE4pr!{QKO>-Z8yLFOmS~~xvf1bM5&qTn$?5beoKBy-b73)ycR-dA~_}uoB`{3SKP)ET^MV9)xM&- z8JNY&OB!(1CmKgzx?(=~$py z9`R75)T?zHPqiimR~Z{0i*8n&tE?J2Gf>$OAR^9iH`R>{p_N6oE$0#6x+YOvFD&x%PP&(s4i zo8f2DDn<(!g@zrRIc^11*Y<7Pab6=C2XCgY6AZFayv_EzBFY-u@h0{OM*VXd?J{Fh zf_mg<2Lls@x98PuSA_4+3?sc@PPVvZLInOuDC<0Q#L9v8YAU#p)eCQrz4BHYE-oe3 zx(PN~bJ8k$aE(@!m%iaXW6mC~VSuji1C^(ZyU4-gsFar~dX4z?(rU0hKe!dX^DLO; z>Rxp1Iz=4KJ~y%oRK@Wu+Ze~~!D)?SSFo1%xNR>{%La`P6d9-0_ zHqGa0h@a5B#>?05tKSGdIWId(=TuqXpU8WBKQc+6IKZ>&O>`kMXY!5dB7QWSwEh98n;_(@pdtYnQ zx$aDuzs4i^%BPwy)2HLd6qp(6xtyI>pBB+SeT7T$HrV9L`}&y{s#4^gHtGsW9>1L0 zB|ait9*+5sgVO8^^l!VUp=6&t6>&a)X>^g{-pV<}pK&qU!na3x*lh}${p?g}h zG5j7!ptX|WtT4)}yZ^Lngb9y(_d4#St0GOu40HGn6N(3-qFYRchn#+>WQttbEaB9J zMbf!*dlKH;TPRvd7`J5eDlH?5+BgZlUwY=780r0( zbl(7ng0Cm2V6#wjhELBcU@xvTPU-q+>-%eKvDJ-TQOl-lZ8#O_)StQL$y@l^7d(z- zDki0C4WxBWDYD93xc0FLTYL0top*R{idN1CLV3K9_MSC`Q|aoxo7ZUTaC;~(x|lOJ zBEfbK)iZEG(VruxDoW}Fb=cb zCjx#$d;O|61Nv7(4<9ih3B)LwRrUIPH z0{Ca?L{Za~Jv6Dp1TtLaI-ijph$#;}yYEU+BC1;yQ0s>gn3ha)YfKtBb69A%*a8P3 z{UoF7ITae3*ydJ)Qwi}l@Jmz&K?KE-Lj_#GoF7h$g z0920b!>^ zP#eoP|CWP-5u*Lmw+>KrlaRLth2tx7-nzt>vha(mkP1r!POI(g!S7$(p4{CBajXX{ z`K5UN2WC5xrmRqOn4F>bdsX%5yk)%UZ$U16>J4W;wM@owE_HW5bpRx0q>5)M{CT zn$GT|ucUY-I1#HPE2f$=k&Qm*ru9hXdv9c(W~$qhb$3jc(~Mm2Q{qYA+LQyIkw`oC zAEFt1N5tQ!Um)Bzrk(vrJTJh?h9-WQWy|82EGS*Jpt}1qZNzdz_GgC{G*MzZUvDmQ z1a0gMwU64oY zlN}W8R-zI4-u~qq9Q4Tszz<@- z*FoM-nSqiaNDn8Cet5Yhksi5Vc~_`%;FrFc;-v zuTwh7F-J93_6}Ep3i#0#?lPyGsrxsS-I^ZbKTi<}sPcMKURsdqOG@?1CV}WZMfk(w zuMS^Aeau96d(V2QvpC1yi5ELbwNJ`6w-6FopIP0k=&mu_IT52=7qn6oAzT63&eE@q ztK9>$I; z_A@t|x6Wi;&gO@S8ZB4}Zf)$+(>0uEw)^ltvKJoRK2_YxUy=8F|AkDpb->-5tc9+s z_V`t|ny>8nsok*9F-7lX(l^AaM3Y_V3jLOSEaSS%18iCAas#z7^AVp&WcvcIu%};p zXXlDa9@sO2Mv9sGn%4Wy%g5a&j-uvLismKT2de_eb{r8|PY+{gvN)8VxT>P%iTlwj zu8N*S9?CA@kk{}ofroas=uWKZPCGsuH!5tpP~TXw@$&KGdXPkW1RcdCTqFo+=-Q&0_86TRoDj^d+CQ~5 z|2kVxj-1N6bt|S{-eJpeG3&)v)^R(DxWAlJmmD;<_1GLQ z2)q9NH5XkvCzW~fq};2BUV({9TzT)#(ZZd*-=g^i&6MH0)_dU>Cz*@+Udp=0I1&?i zIJ7XftK_!=4-@zR9$QVzI?r`2HaHONxlDd|BsZ$9IcKM16n-KeR7;H6d(O!e!||{d zt#IUv;r-d|shnaaC}|3|j#71dOsH@T(eMbPd`FPZT1T$J7&oop=GSsppEq8o?i*Gj z8G{i_nU{qm&kJ(PXEc=gk4tr2^*tjbHDi#T(e_%xr_DEO9T-KkKH z+f?_!X%5he$YhFb8uAaTT5{vYbB$0*7i!1uRnfwd%Cf@S<+pWzKZQ+qC^Tp&l_jZ1FN_}iFT_1LMyRvuP)zwKFclX=^C7reZi3+Zk8#ZSP3QK94XuW< z8~r>_Y&hj+hbLDSOdwmrrh4`4X5@|6L&>{>8Swe-DkgTMt{{47UCR{CK?*EXpJ~>K z@wzoQUTf-vC?4qdXg&5}xK2CL?bpZ}HEv1ibD4DA*;p%uue)Ns5;CSc$OmxZUX?YT z;gbv6^^fXeef%;#mfu(yYIaaRW4F$2L?@Q_HvWyx;=L%1(4#BiW;#8z4-90*42gw9 z6mY!*>^gfIK3#LZX=Z|4b8A*HUHl0>%^;Jb4!TI=^Q@b$PNR}v$82|lA8|%lk$O8k zaH3N3uyjjTy`{(_`aqCOYD3TV}gc}?vew;MB{mPIt*yGpk-NepH-%Mt(=omr?z<4+TE*Y*1_ zh6qY|c0BFzh&bfoH?a}8vsfUyo`Dj#X?d>3<(s4$VsfZWMKB#U*)m4wi;0L|(=iy9 zU5YG46M`;e*Kq!Ys*pM<-OIg-eY4Pn^Q1FfbT5CoX`gSmG!f6_0LQf!zoZ6@dx3fQ z#A4Wq6WbHhdARgRc(_D$9H+~P=h<%hR}pz$eSbw#@ibnu#o)cSB2@H&$KY$V`@J_> z*`1cz=ou-u-|u+lcF%>!Y_Uo0_q$hOg;8QC5^sPe7wex_YeLUnt; z_CRZ_Xz`uVJ|fgx)l`xg4HH zsgHfRaw2{+hc`=T`}*|w{_ATz`83x{aG;?5ZY^$|)wOYLbmlPCC-)HR8I`^+X{)>57n6@eH>Ab*>JV{lS-U~z|_#Y}Jzx2U)%A0tbq!+kLk!O%xLB{%ob zuI9AOu8s{hm9b>@H-AK;CGwlEr9BfRMoI0Y*xxIlm6#pMUXw{~i(1df-QJcf#-ZFT zo(-54J!ZLo@)-Y%GghF&wsr#X6Rz9d)zoR@0WtJ>58Kq0x8^l!BKWf})%9fZdo46M z`s!-nKna)DE-#YC6U{0qIWtj5js~yWnm>==jL+MrD3E${=cYzy6nf38M{&8kqH!#} zAGm`^Y|(VqNRDTEBh*XzWBtEKq4hdhZu>}P3ZSo%8Z8+_`mQ_)qJ2c|U%T5|Y?rio zU2A{uGh}FbOs;^AcP(|Egk(6kW}{e{J9#>GB^AHEevA1NS5qsJ7f0Y?Ia+VGcl?nr z<|I)s)dH@{hFJL3@WaRcV`C9u9Q{rIeWFMVa_0MUMe%IQ1X9%%AsiyPQAazSZ7Gqj zEI5~$wgZNh5+4k*EIT=Em0~sc@|Et3?)zyFlrwMp1+go)PV2ZfrKx*3f8Nx8!}@`W zQv1f_hUC)A$95IgEXvZOvq|0dGVSB2(z)pwUiH7G|e^Xg`5xkXx&iWgj*)HqyFu zFtpWTp(%Sq4l#ivESW>kWUI>Ro#c`cL8Fe^i66Ni$<&SOT6TyhXpjS6&O>!tV=?!c zF{|E}@2f}R*W2C78#p<5cRAzELXZZ8O^V)TeF&AC!dah93ih(icbm@?6fkBABN8JU zl!@;hB)cn3EL)qc0ejD^ek##aS7vd=3=4LyfDg5q7LfWVy3S`A7vk@D?l`h zXzsLCFHOZ(6S5ctemcKdgiQSTo>&q7*QcU8&0@00gltxHj^Zh-QlA1UA7GLi=O}-4 z7;>Z~O(C>Al=0hsDq7^FrB91P`~qXRq~eN3gWR@Ji8~o2a34LOVvpd-==P)4zGbiT zolJ_*Amn>m_A7t?d@q?^C5)v^?vXn{?D3M9uke`cSC8)FXWjS_`NF*C%s!X!6IyCVL(0+YKfGG4 z%&&f5y)YbE6QDZB$+6`!H}w1&@tZ7Ms;grf(nUJ=&R?jB-Lsn8`gnCC%%s`V=29pj zns12mb^|VPf6fG5*9y4t|`GDkdWnkDOoe+mZ-XVSIX4pvcX?%9# zowFMQ>6YTWscrHx{q1w?foD1Mn^SSEpYPblWaJ)|L)7PNNNp?xW0Var@2VvhJ#Fg_ zIIL8~oC|(soR7t<-crW!)b$xaFeENp^XIooVkW@ytVob7J;KV@l|s#L_-Vjtc#yx)y@Em%%9l2$>z_NCgM zPZHwuXILov`1J~NA0*|YMrgIa_BbYMWIXokT-hz4LP*wdSaHhHZ9INLiCv1&qw&UZ z-#>CgxEJS1z|*Zrg}V1Pd>v?~JvQ6;nSTk@NY+*B@k8JQIy{~rJ= zK-9l-CK@T`y~|u?-bl86`1ctlJCalf{7X~Qkr>YRZs$)fB)$^gbztp1;*UraWL0iM zctW(v;_x+-8e2J!XPN~o3KKO7~l7%->6fN#DQC&IfW+MhR(XYH8gzv{l~kA zsNat&1bejr)MfYkD&N;bt;t}>K>jjn_A|%%rk_AHiJ$t4$uKHMX|6nE--n9$nNKePduVgff5~l@3S=TQJkUB8QjiU7>%J0#zE&ZdmVrCU z^D{EkxX$sZkRgp+F;kz2Nq0s4JXiYWGbRR^X^z2_kiaXwSXs8zSN1-z0Mxli1GHSDV6ox&x(a2K}TTz*b z53-D^b}p^>w0K*QcC!$z3lv}YHxkg6(vw;ntCAVCvvC>7)OlkTRDB&8>1j1J7WR`V@=&__Zdr1Mj>=*XZ=+ zlb_4cIj!<}mN5vOj>{@#W-jQ+3wXyMIDq!fXLn6g8_<@Yc=6e$E?U1zo#3(HL@SGY znn9aCK5E;h<*oDM!$Z;avrBGhn%+YvEBh6V5*zY5>3dP{=GQ;`(Gazre^wnz6H!Bz zy60uq4OF#gdfxkNi1OEm4$}9f;JvE&S%)qQ6l)0YoaWF(k&>F`NvUq+{RyRI7r295 zsg;?c7&7Fjzgn@QA4YcG^i$@l$`X^AYAb*e5|W3an_JFKNW5BfGDTd>%wQgJDZ{gBd!-W4@a00J?`V zvzUGT(0ilEm0DsBeF6?m2V3>fFR89|p|KSG&I>2($5%y`}bUL!^dsT@3P$T(EL?SeMK??jpKJ7-BEvx`qCrpTq{AS z6Oq0{R-}WfeBlF;wSuU)&>K~=Dv0t6eHP|(`%${`Ibo*kGm6$8>C%aCqu>uI&HdaD z$RFT(8Mje`eA=*QKI#R?TbEV6ctMmHfBulY8di%`xwu0y`-8W5kBR3VKa_H23+q($ z2{tfI;PFUN-s9wD_*|I!!a%bhkrCf?qNN*=JV>rMMiPn~Uh~sE-Oq@AD5r@dE>NFNkSpvRY9hP02zHN!~ z96J z-${&g^6C>pR;VKucdx*?9U5=8KV&W+>$Dv)Rh_j1bqYxP)XeF@nS}zH#uHgD_MpNn`oa09L_4^-{Z^*< zX?%Fwkz5!-RnGseDk{nN<8B4~ntdIEQ(IkwB-==jI zn#RcA9P(`Y1dJA+A21f5!)Sk1zu+H!jQ%ofe-B!;fELVZs0v-J!zUm(XYBphXp4kDeBT z0TKVh=(at2!p-w0x)?>fZW8Tt$EzE9S)Wd#oo&qf(sfp}h8y_?|0uwxLkSI`_Kav^ zA#110phTUo@?Cq2=R~__Iz7W4kD88)XQXa=po)_G%j*0)RJbg^tG zd!p`{+{TMC1?5CJb`=yaiQGIvc?da9Lj4cJx7OJsnSYYv>1%~8J($jlIqK(`!*%b@ zy!5_v2zpON;kNb*2@*>e9;+Tg*5Ex=X1CpVAO7X&L465Srzvx~$=pPf%xqbd4sm>) zh-cbn-HNWbpFUOY!|0FY(d7Gi3Pa0TbI>9C;)nFdoF#YdScG zvC3^cj3=ux+Ra(eJVVrL#sT@~c77P{4SVluVToaKt~VbG|6u6YgX@xe74i8@k9zS@ z74*NF{;DEy1idqLC1>qT&~r%7+~t%ry7W{;k2W@;BUrC*?T#?o74}sWIK0KjD&<2n zCA4Vr@ocBLY=cJE_Ih6e9U2}V7&m)#4z;IyG+wRrquO~nE1i55RbCgBU43*=Ie6LT z;`b(0R;L`9>77Nn=k_wIJB7$^+DzUNZL!6Bs!Y+_}lv)Dl{FKx~`hZzo3Zez`weSGA8)yrz}Ao zHiMa0r5p4Q+%Z$oP~I~UjhO*o4;o@zH*+=R;564WOmAG?T+!{qv{@->1kwIXnQMHe zsA0pTobD!*L@>secxjzEj$>>g;;2xQHbxf$vKC)wV5F@#ww(DbhU0%;SB&0|FB)xU z*2Baxs4vno?s5eK0#lVnE0*ZHa&e*c{#o?UrR^>Ld>mbYyWV>zwWEFSS$aJuZG7U& z+{-%s0xgTG54C4M;6s7?Wt+$xG<^6=H?C@ix}HDNUvC?sHb;^yckvf$Mo+bKz9-uK zip%@I2#BFduOabzbqtCFe%zXs|L=Gq^C@K&FFo;RkN?$R<2jklZgUo1W30xmVZM07 z_r&a7dm+*T{+(lBSAhz}ff393e6Xm7XZX{GT*uM=BcgmfN; zMx(x*sS3vEdZ#Bp!#E~3yIGy)tT7#v=+Mw?i`jH5!NnPI%-sxm(w^*u@5bh|lWN@f z5&E{~=lgy5QK6Of*QXahCjRaXwhO_J!zFCLMYdrsJ|%2_lP12cB%Qdmse;*R=}MMG z8q8RHC%b&+4yK#D_;;m^VJb+0Iru1XUTNL`Rr2F6aUPlO8&leeu~<%*OL{UGb*$l5 z)qYHTzR`83|MJB!jn49wUmF-qA}~@rtzqE6pXk)2Wc10k|5V{`L)VqFhp&r{pt7OLw? z?F_@P(9_bwEO!SBGs`igr7>8r62GNMC5(BWgb7~a`nCDvDoKa>{@|za)9vjyzu||# z+56+?sWB(|pgo#&CuY0+CwIx0V@9{RjN=v~Q9rEo`kEVjt*m7 z<-lUc??V{vIo7gjScnllwm;x=A% zI*@;_&uT$O_*tS-46p4C$x$SLFkvFxp zIiYb1Rrdy`YIj87gYY?mxyn&=9;D_m>AQmdZohA)1QCo_W?i|x`wAvjQz5qa5i_fC zKKs`4@Z)lKL1~@==B@b~&iXvUVqiqbpUrVBr8We05!b6N+bsJ8%xGgpQ1cCElo3|s zUVdSPUh!$m!>_3_oKD!!~2m>M=~XO*A9 zB%_+KCp$UD)rJGyi0hU{LsWO`rm$f|;n|HcgCqF-mnYKPcAn@rw;i!}okP!{*VUO- zd35)XQM22oqa#_aEQg~3?X)qHCqDf~+kvl_>|}h=YDCd3YV`=MH$_B8r{nRV%Ii?^ z)Bm^ocl&A#;k1Ca5 z!ROo#C)*w@l{f`)C+x>6q^=K$tYgiCoV zznwxaWWCA3uMdsi{eDDY&5eXb#^DN9C)EbJUBa<4aF68ckT8}DPaQVeT*H#o6f>36 z4=jq*#r`TIU;#eSz1q7le^AYIF{l|odU7Spd5>W3J8x0pWns+z95?6Pc^NBf##mc^wbXs=)Hl+klXYmgkgYFJP|u$z&W zvU!X5QkM!N@yUoS?9MaFbmP1ZA`5SJAuy;Oc5cmW*DPap1xjgis_1=UiaD}{9v=i`xJdF?)iI|Wq=(kG_2}Zdg`$j z!T#pq;RdYxiTEe6J;%oG&OnpPE7&~LqTuYyj=!A3E$)4k_{-;)8#K;& zOau$Q^0#?imfwRZ$D6~?J9RM@zEDC#_5#E0-DR3xJ2AZ5^m^dKEDU8{{Y4V~0{se_ zU3>)==slG&^Ha?l-R+P6dS^(ZtFL2Vx}X|e5iB1+OdUeU?xGo%mn3M`{MlN#`gV)= zidjCN-aRk2@QdK^%g>3w!-=Vl@YqGHXXda^E;wKPj;buj9NNc+@rm2AJtchwz2v{S zpMIgi2o0Xm-m}1D)*7vw!4T%m{|bAH7h=INDDMr6AyyJNX(%Ia;P>$ovFlFj*x<-F zN_wBk2 zAf55@zWL$^f#k2mF=mrH1QO5f>o1c0@b{J1Nh7M4*qB#peHodH^(CX`zgI)>+x`eu z((4!aMas;c&K-hPodoML8GbAmamKBxhG0pHIh*-W4Hni*Qo3entM;l|-V)VU6#u)WfB+oY}!AQ>Su2YMbFr=Un zm-cN21I_2}DiG_d{k)x`+7-Ld$9S+tx?>u>bqBV88yG`Z+efm5l3n<8x39)C zYT2t@+7fD8*tg{qLyrs}5^mU^32qTW{z&oJghMT;lU$bUejSSTu+oYwrwV)ycMg0R zd<5enJ$fcWJ210u%p`rf9`lbo#wAofV|j5e?Tqbt{O&uS{7%;mo2!v`-Nus$qyo#X z5nPD`axsI$MKAvlC?sSYNOQCZ6r0md2X}=MD6<34?I_zMP-ft!dXG4PGHrV8kK$tj z<<3=4UK}NmFFvpMs1ZdV*A`rQl_y3ZbC~|aspUW*b@hMKb~%bbLj2y2e*SE&AyD1G56ib23|{v!!E93A{gdXFH6*SrsHFdirP6p`5|=joCVyEX3_1L5=vK)B6M^h*PfKKX4uL}Z zncqd8lLV?N3IjF&B?9&Afc|puGs3o_qs6g?e+b*%%%XQlT_tRP_}1Iz#Vf*g%k#<= zVFrY4X|X%L5mpJ*s;bR*9q$vU(w;sN^5`N^h7F&JxUNi~uw0aIjV2?I??@6VkPjn} zZrG=s-rh_gS)4MolcmAmgxH8ueJX579oxCMHw5d}Gz$2gg^JYN`7-7|{C6S~j<_&~ii++~E@IDpCP7^+S7QBCQoYWYdSK~X) zas#(`Z{V6g@bb=i|#bLBK>CLxEN~!Dntrr#qtgCDaFulG?WtkO1qMnj%r~1 zYJ_DnG4K2B^|0?9xnwN-rHdFbA==SbHLnj{--peHT&8Etj0CcV1kdl}@&wAL{L5sU z_Xyi|JW~GR^MOFq;ywJm@)cp{@!i=2?oSE3b{W`nsQxdat}~wN_j}uW@4ffloBP=kA2y|@4S|KR8Q$m@FMb)C=qzR$VNbDmZT4&MDnMj!-~&A$zW?8Ja#&?~AcLjuT8C_lN7`V7c?7~%%_5`k2_5tk?7 z2@toGr|09T0TKTft=_R3AQ%l@ctTl|EpIy?pQ(zhn(t3NxUoqj42!Z3?u!f$LFFpNTeuH1CYsdZPns$!Xq{WWbY_ z`8u77@gkZaD5x0@r2c_2KG&6j!c%xFw+aE8uoJUp+c$uggq@o4F#_6$ ziSt@GSAf>%nzy~$DWG-SuPz;q1DbA@Np|XXpl(&(<1Co~s_(symS5t46486ovF`zS z3VDiy@+OdG+1+zUaRL%?-IdnCG9day*5;b(4TJ*5W>V2ffFJS1oXu?n@U-aKzjvMm zoN>xO@5zbK;Xa!xj`tecH&BN^B@4gjL=!vVW87HZZG8L&e}-cO8jqBv z^eVKYv7R~S7gP+CTkNiKx6c60>nYrZTuq?6lY952br#TDj+@n&P5}K}oxzopW55vb zhk8RI8W?OGJzUb$fI;U1>z*JE&_5GOyDC!ybiSM7&!t;|wts=HLL(4pmPI8Z=O}@C z`5?J~@-k3nh}yYW3jp~rS<_!40U#6R=jxRo1CrC`Od`MRfyl-8?17dK5X`<)xTd`a z_~thP7LvySw`7gxu;D)7h+XU!5o1FKpwH&daDaBVv^QgzuzTd9yLR)W}P?U?wxpnrpt3fhp*?MDc_YmPg#C6MO>># zsfLHf`2u?x6Mv$?f@=qAY(&Srw?BU}A;Btjj5AOl?z4~@tgmBH!M8_@TCQaa-sYo2 z1F^WDhvx;+B(CoZo0ciG+!cSBXlw&*eLf(Xaea-BuAG`_W;6r><#h$Yf)_xl=_WeS zE(uh*Wj7)LY_+u%$hna9?z1X`+CZH9INpdrv1Z!0YUDqLE_6LQQz-j`z7`3bvk()nA=HRgeMjI*plQ2~g^ zn4`NBl7T?j@I>~NKESh8i1vHI0XRtxWrDoF(c#0=<|l1>Xvg5St4dKmR&SnY{TF%) zt*eE9&BFHatSQ>eOgN9BW#jAKdk<;Q!fQsi#*+k?ZqsZ|-jRi7)&3Pc?;}Ao#rIw^ zB)>tEH+t+O8RyZz$ymFId)mjmcSe3>A3wu$jNea(dA${sKvni4y^pOdQJZe(iqq;n z^zY7i_19YhXqF`PQ%;)Gdy;8;#lpalP(?HPjTM;cV`<*kwE(M9*=%3x8nB&Xe{0Of2ka!TGniN|0ek5L zNz{=yu-}Z2up1KucJ;=g)gw<})0|gP=#K}Mf;UgXSAGHWclGBE2WG$&bViW+J`FGm z3bPJvCIfveUwPxp>p*9oS>V_v2eblDA1X5}0(JfQ+_CFYKq=Gk_@W^lkc)0d2~Jx8 zDgGxfvjuWMl(^#s;ehW&pgZfboUeV8B)UQ>;<6i}r77aE9JLkG9#_>^34D;#EcqqIP$-Gpx}311q1N zNLMudXZSjc#3?lLw*3S3zkp-jyW>q=KkpqJ<6L!l0deVDsE+vk(&^_HQK!K_&rl{G zH2T^>n))p}TCA4o`KsWG^ zY1I@2?y{%zBXfGd?J|r{FB%VAs(~li2;za`%9bdl+Y+#!AfNpFfge~KgSwd}x`9Qe zwsY`&Auw4)zdLMi0fxy>Ge*+|KpztAsrb1-|ykprgUg+g1FrXm{0XB+ePj zvy5{;?k|O)t&QdBwYMwS_whtZ;ENerwLGI3ZG0N7(DzC?X*@#9T(jeT?we>S@P0YY z=NUBTc#bRdJ)kk?Vdgh@RmZ&de!Q$SI#+v)KZpr_Z@~1{23!4K?mRNXr;{RXg@Lztr&`X#B^_? zMO^cxq@s&xLfNg~$BO!x_x{0W#jBQH$GBLG?G@XoENV8E(T*O*M+2*egWUM-Xl7w* zCz4bEZJf1}*v{+$9EGwu-sk_YJbtqFj0M1)o$zi^Lenu`|;2pjkZTHLz_;Ww^XVrUxfV+svWo=6k_*=dkO!yN7^?QmvT3&&` zYIwg!;0qAQ-JA=@dkg$+bMclealjYN(or3K8F=g5su!=_1D?`DrZz8n;MT^EOr$&u z9JQoz6rC%;)+$4B$tE3GrbX-rFJAzr)zSPf!CJtunGk43$qMu?y-~^Z=Yh7KIY08z z9iZWDl-_L&1xjiBE#;=$K&Idj#xJ}B#Nz|Ci^S!CpY5-N$v1!#>$Yx6PlpcYyZwc9 zaL|Eg{BS~gc# zCyYr%Qn*pno8<0bm}Pw3w{R$cV(dxT?}>`AnEKklC> z?FF<$TqH9?FbR13TM??=4}mO9lWneD6KEY|$mL!$0+aHE4lg@LV817?S3l7LJTWw- z^}iQ^@8^hrjja<1yu-)Q8{P(?Y!%iPEH|s9gX7?r%$jG`E0nk|?#NpcQyCY}MV)(F2d{ zzr%Mjy}-q_y*FGs0_;nS%n6{_BXcQG-Uc%3yNVAOrGYTB#a7hO8SvzJ$ji9ZusS4`Q+Qbq zaB%8)2PZH+vg^X9%`^qHt=yNt`Kb)ki3uBJy>!q9LGAUY;^}DJb^n=XB{N#d={^1X z^J_FcdXGwZ?LXiBU$xSMU?R`sI8?L|Kl*u*8@0cx<7d5JhDM7|pV&)KMJqFsza6~n zuzFOfEUnfVh-7h&uAXZLsyV&|PTL>ApqBD*vA7>tf0P9cF8>AYLEddk@dV)K&Q~+3 z$N<4Y8OrNJyC7W4`r*#N5QyQ1TbaLI0H;>w~y zbks*6R;@<=hb$RH9|k0FwaJ3;&&0S4vp5j?c>0amc?J;FlRM)P_yqWWW&FMGP6m8} zlZF!e2Efx({kBIy3Aja;o|y!A0LSxXX{I^{>m|qZtZLee z(iv#Bpq%mSzyEvz!;((fUOxZjt<`VpU;7HSp{@$v%#K}zCN`GyPEKKU z%k_IB=hYn`9U@aMjgbc0?e%yy`eb0f%`$8{c?6t&>Ug7+I>6_s5GmBl0z!ljba8^F zKx9T`qI^OS#8qFr(wH9~{OB&P;9|CvArODUzvA|I>VfEl&)J7Y0wV~~ z{B_zae3wVZzyA^0b3bP;{a0VtrJj^t4z)#n78~z(|LI|RO4zr%a&&0>zR#shFBZUG z;UA*0;s7ejAl*EzFTl__NSeIo1ngYM^k5(bcohnjdVU^)pz!^I%a`*(R3}hi#&{Ye z?%5KsGxmZsoz~44@2`T)bZ^AFb1Wcxm94|v&lhCVAVCJxEo56w39In!K=xuiB{oP3 zGCQ1fboU>C4DU{nRF*49t|YLT<%ojB)mxX;{N_L`nE#g*(YFzTtj<{dap7l_?LV|g zyq$has^&Njqx?3$td{e~xM4(1Ze8;T4Tk3Bl-Hd@i!?v=6?E>Q{YT&L#y%|sVi8+m zJCifm@8uM)(4;7^P=-4Wo~Q$EwY{mna%15CrYX=xZUG`1+{)o!Xh32>y(|99N03pk z`c6(83$pKTNC#QVg1pelRORgxApicuV@mV)ApfU3rBz=TlJL; zfl$TuLFpHb*!^$cv@E^_{P?!v*dRCXihVepoB_anzcpNabPYIs9{0U?(FCmVv_B2{ zUIEjH!qrKx|F7qD@*|l?fX=jLfd7dLP>xIp*t4Ahl935f=lN_P8h2Z{P<96hLuK^I z>RSP~w>f~-QwCeM{KBGwCkh={oPh5{xoA(jF}mZY71}k5_4VSfMVqlxKW<4)prw#- zqwU6^W8Oy(9{KWl=^x|fci+fFf0Ux(tX?9yv>Rw8EJ3Eq0S|DV9h7&d%47Om+)E=X zGGJibyCNkCz@F!4F&8@ryfb0+k*oqB^pbLb;3+SNzg;|)xj_Ij&JKraf9XNa`O`W! z90v01_bWTTvV&rLZXB(^Yfxe^o)il?3rgBzle?p3pmd?|lTqCeC}uV%>XB}NBE4u# zbIKaXFQ)srOpb!wwN`sqJs*&@v1m&EE(J1Nr#J6dCxK)&`<--qPY^o_I~CrM03xlm z^AUqvAXF-osKX};0_mHi1cjQwmyp5rD$oUZ3Ew2lu~w`juNQk+WlDg=msjStdr$tYM|1I?{`Xp6O?y9Rn)JX1!eu)*j|Nl zP+apqUlMPe8H@oL1y;Vb*TEoZfm^0d9spwhLT;LO znSsbdf_4d-n;@jAbF1!={FDwsQzI}9{}1~3x%Uw4#4Im zxfLEc27JsU4-(9%L0DIG(vdw9Bqdsh=ghI+YsJ8I3zwcdl`bB2d2_oMSYNcKs8RG)(6t>@@K!$&w|9X?e?pW zRv>0(+hzGE1%#WQ%`g3Z4T7gKJ1tn(f&a!(B;N=*@H%>F8zd6}H#z9j)64>Uyh~+^ z-x#ns2^s3+KLEy3uR_t85}`WT_s0u#Fc)yq#XlVxdc|Vd$^$k#V{CrL1mmjDeQ1MEW}bWg4CZMR4Km$ zK?0{l-|`bLh-w8jg`Qc%>X7&c&02dP5U#W#cSRNWPF)q5$8!XplR?%wOVz+JI-POC z&kk7cl{>A<^#ju@yV#Vlc}%amY-nb64XD&(1(|#(fZXkq(J#RfAboekeeqTi5EA;> z=q8x~UL8kEo(k6II2ix>E>H;Tv$ISGKfi(wM)(CDVtZY8Jgkh~ioHN9$*GrRmRXMD zFhQ)Cc_Sn2823K1Iv}~qi59uP+_j9ULr2=e725q-K*`>mm2S@n%)D<$9hMD%mzCzE zs7?h4E4J2KRFZ(?>ol^0lj$Jmb+g{p@EIuSEYOaBNd`5NO0Y_!2936ucaZ}3K&zMb z-c9Xlfa@=G(r4}f#J8pqzD59{ngtQg9{?_e`tQ4^S}YOW%At2V|NWbW6H&K?|WY_ z;Io%@J9Oh?{a@jik`=V*D3w7(;L3S)DBDE5!bgvG-d!Cld&-MeN^yI?P52-4KEW1J z<8#a682^zY7B)$+Lo26E_Nd940e6h#90gq+(8T}zJKawLY)b!%M|W3%|D;?E{YE{A zG4ft>&J_Zgr|+0;syngYr|1kCYXp_De)^n%HPHBBCiozJ9?(~Z!Sj=|0P}8q&n&4y zdogzPdvZDGkjHt3YxRKktl6O`TPSFMC{Zlw8wBVLe0lBSM*#kcYhAV_pv5eztHs?3 z>eqTUAD(&)s@I$TRNCWV_s7GJU0A-LC`s~CRk$7G*52K92nYsQ;rEx9k8nY15nHs$ z{RYI#Q!VQlwm~E{hE3J46@*TiF5X?af#r7#?c=k*fH%~pqwyN1e~pcCgpLaUyC^%| zxR5n4k6RNIzg7eKQ)S8diqk+{@HoUC>#tLUk<*3_RRh@z>(WM#Od!n3&UJLSj`iJ_ z&UiDO0$dx@hIx|;bW|3vRa=L}LH+VcmYP_!^Exo)Hg6(YdGFI7(KLO``}m&vnyPx% zG5%*{DpZw}jn;mvsd;hY0>PEg_<>MGp!>ny{J^vqI3EQZeD~x4!E6Gq7i5_rDM09Q zqUaUK-B|t7mZt_vjS6^tUtWQ_LDQ%0h5LX~r?;%>{6O1=g^GgT3v|3_{KQSHLD#$L zlwrCJ=mxcaG-)aXUE}Y3y_vJ1vqPz!A}|5k2Tp;M;>4gWIe?SM#{_7;C+Vj$J7^sI zq#CV}0=32p;VF&@Q1Moo5^l{0B}tm#S$PkTZ$Dp_HgM^C- z2lOWJMotOogI+S*UJ%15=t=R#FrjIsgf@Jybo&-ww-JdotEjh$;w6tVJp zlAykk$NAuIHK=-j*v=}e0%g4UkGGi%LBZGlZiCiyEWedCz3?>=Wc=5*u8W-j$xCgo zbM3x>=mMXUgBvFZ+wsihc=Lcj9E;7q&;{V*^M9%A7zo@w*E2G5^?-eZygo^v5?IU{ zdsoUDfj*&Gne<95Q12Dq3p3ULikL4w{`OWt_Vh*H=BpMU45rWZpbQ6m)`mH6Q#`<> z6PtO#6^V{;lnOX3YtVrw*TIRrd9>XrQOKo-eGeq=H95V}$Gng8HSwK!=J+4Jv4Quz zbQ0|(xZfCiR0U+T(%gy!l)zkL_CcHHBJkbX3Ei<-2C&j;YN>&qB zmC{N<-R|;0u*G`-Xcy%Q@c^Biv&MWf4WM`GgJKCy5$HR0vPODc0s{-3o=@dAV4#pa z^=A@5|JS~xMD9z_H<;v=Y)%0^oJ$l2IC-G6e&o*BLk8MGjfoWAWPqO1^bp`*1C6=+ z4{YkN??O}EjB1ewl;0~%EAx?p!j=fr^O;?cYhmj$*);>1Z#!!w|9?+3xi{3rM}e5q zNX(b)3n2WA_|5C9-k6?y@v3G!miI}St=)}C1@6AVWOKrLVDGBhWXp;L7SVs=a=A`G zf8)xJIe)DGniXR=Bhm~M{I_2`xHJf4BAe=(q;x>Y6#Ve}4)Rf>kueRKpkig)_$_n@ zGz)xRhbOy&c9iIE(!CPUBh!DGUC0FbMHjQ;Cv(8yn|84dM-mvm8C;`|V**2m6g8T! zMPM+J$T~qa1O}=bCVwuffnKZkZ#_Q>&`md_irses9V&Un6oowi4Le)f+N+?MQq7se zRtRcMy)oiLE}%kf>}hfP02Fm3H$#3ggWSP*T=_Z;$c$*Z5{QI?K#dnNRzpn9t+h{2&_*VYJ<73`OBZU=C5=ppi^2xUdBJ=4#i~ug`gx>QRki<+zDCyMz{@dga1KKA6*UcPXUjz>wHuwn{?RTKPydHPF z_ztuy%*phOHWzLDwdtBj|A-d)vou$J{kJD^Bp|mvXM5&24hv4qoFk)AfJc;5?v9fH zbd>DIJhoWf$$nQxd;rsp!q{=r%K|_)IPj}Qwg)Ip-BT7b?*Uu=h?4Xf)RU$eichP7*ogL;?lN(@uwq)nWhe1AXFfF z|B8Ah@MoC^U378=UUB-NbFJ#YIp)DSIkX0>TOZy%MZUnKq?7o@OdDt$=ba5sUj&Np z_M=BpIY6r6-uNKj9f+^|IFOcq0(j|)(T!ZIfD@trw(@HrI-tDP`TnUf+9iLo5LIk| zwx7|+T1|4J^}AfRJe()d++17b>^a(F-iJ@l)@AerALHe#zfWqv4x*Fd91|93+1U zhCR36Xg|*e{oOk(%Y&~$&p*0^hJPD$UJq#WTWbM4DU@{L@Bqyg9p`^06QGvw$C^E( z4a%Q1zGeodfI{V1P>_NU$enNE3^8p38Me=SmwoL(0u}vu-GSw;u_7!KX0;&rFVZLI zx-jsKx*F6!T?C$62{@OU{eUCsK~=j!1F*D~Zxxor10&1unTHkjK=Y^JPFLqwAlD2E za;r=R;>IwSB;s%&!s$<5(`N(R+nm)sUH{PG_7jB>Z0>81c{^dqt_y7|aU-L+HMH6O zqu6|62d%ET{wO(Zi)LEg??jCL*BAXuWWo~8EPfn^RcRNi>j#*wyK?p#$%qoB%R8(E zEW84K;x`%xltY+)?cxp=>7c-H<(lo$Pf%SEO@8K>11NgMnRDz8=(KZ-C7pZ*`i*Da zI{#|`qci6!PH18LSIb{Kint0U!H3_x98ZBsAG3g$%_}e=7coleD8S;R@8Nt#0t^RB z?K5kML4WkZhYy73K<|XVj)oXJ=v=X+zGVLdz(hisy0RNIh0+!>U9&(9|Mc*WSR7C$ zwQoMk?f?ZkNp~%xB9JZktWXd!4^k`nrxQcAL7Xz`bM~svZ^ERGugiZNow9bF5^} zW4UO>w31gi^b(qiv^#UH2ab9FbFw*>Yn%HRuM_S*HoxQp#4Dfb_*?jZ*}ry;d`}DO zBY%!^GOq@y^9ea;C&@uk`Ava$@px8ZnW^c|_o7xjZl>tMQlP&k-uDp?R@n1Ttuc-wjo4j4^5u4Gg81j8i1f$Z)& z&<{5$EjOqI-9g62a#O7Sm}xmP!^{mRZTk)$yB=r=lQjP8@dZ^<$rq&pn9jk}-|eOD z0P+&uBV^r`Ak%Gi@;&Dyruzlk=cV<5XdJ!w_^vMqF>`9w?pFYRd_DQ|wnE@(pvX-) z_X9Xtx){0O3$QA1$KAtp4#s9LZ65i2pfkI2|G>TgC?(ylF73(!aZnhku$?jxoG#9J zpi2jMH4k8YIt?8JUffnb6i2&~Swi)OfHvQ^J-+`@5Uq3G%TytxK&x|2o}c*?(2`h= zX8m3OnvnFj4-{)U=DjcIT$Wcx%Q4;G7-#-HLxqgM$Rq8G4>JcC5)Y?c;dcVP(Hg4D z+rL3q;y1Qe!3nfg?JB4;96`$@Du%XN0@T`;7T2YuK>6D;kCZcZ9*zBQ-qg7aaq_%)?XJ>VUD+35bsFEwatjGGNQ4 zD9qP^^Tn&AhM=@}QRCuf6=;&1)2W|aB-eo2N&{L%l!PAHYgRKuc{^smBT6?>H9Ypo5pUogb5FBY&Ms)eW_ai(?*c0ntKXsq#m>~cTv1DU*dPZO| z4-#w0xeScN?sQVUX+X!PX8pSUJW$cm=?`gM2IArLp-7)MfG^2b?W3OsIIf}tQwPt` zp@y59h;Av`iXaRA@z4aVFDGcd{M&+7M&?Ib&m^Fwc$-LvSZtmmTc$}BFBnZrNm{0> zuc2Yrii_1^_Q$+;OcvWm82k_7pq)^*N}FgepnQJrivI8vaGa3pC1Vc;;n5a5_osJ2 zR^{36Tzag2eL#c9+Jq ztAREr-Fnt3OpguNvM4LO18NCxKlG$efwEq@KL4IBDEOByd=<+A*`c==d3$s~Dwomp zuM01TWmvF8tQmpOyrx{nk96Q~rn39(dK!3^ukKtl;swrMN*d~a-vX=JBgczfSl_4S zc_j~S1kg2IVmbdI256)&4v67R0@<4=5J!j5#r@J~%?-Lc0^3tF+5`$$a)7*U=7{gmP!7^E}b(v9~AeU+m^TFgJ8`?i_QJW~U-16=~r zd*T2+yEa)sC|9||>)jaf3;}vx z(y(d6H$Zb=JVZ(D0#JC3zxk|>={oYSf5#ed0-jC9idu3JI`G23DNs|2wl@zp2Kw62 z<{S@Qmiri5c_l!2^T8#w6nWOpF@zq?2C)@!zT`zyhDMn^?@yzNUh8lxJa06TpjX#b z$c%a?o`_ye`gRir~m^3G*->-Agp7v|GK4&7F(|o+KezTSTLJc}jNN*xbClfs`(b zHR}BH9Pa5L`J&}IgJ=$Vr_AEmuO zHa&v(UGq99yJo)jXbX8wQtoi)|?M{nXV~Q65hTo$sGwmLT&?_oKw`w@QE8Vd|$!*WW(Au^N&q|%^|vK&H4>6IeJVaIYa_|9NVYyc`Bf}ln}3NmID-2 zsSB6t{sM_&(bLp`zd$fqcm6w@KHwCW;Fr}z-jFpKCb6kEd0c@0;?$NZdD@}gAB1of zuN<|>@u|uC{--b1)e?AD-b^}0A$NE`-MKTzsW1aG$km{6%R6qGDS^&G{EV z_LNok^zb1leZ3Y(D5eAI0TK^J=Q{u?duPRkZh>}kwf!y0y!@WN87Q^EIO|M!FibHHQTD{iy&o7j1u1YX^iygSVW<{XibqM7kRW%2t{-@(`dz3CAInwDOwb8~obhjYOV zHBVlXxpe-+G4EAcoa?iC|KX!o3)XmlSbe&^$hf{PjJLrpyexnK?$$gcd=K z5ok3GjE_b+c1H4QftGx>VPLTtxFR%S$cykn_&2p*d9y(Po04he+ zHho+`6m8+tf5kgz!B`Kur9%vY%$OR4loN+DHxu6XOZcPtRgf*#$e)SmvI~G9`ej85Ck= z4F2nj7FAxhR_N&XFAfz2;tW)|KvTfnZ6AaSoVV3`K8ge8OQMm=CS(Q4TIJHP)Vm

lBQv$5$r9th%V&+`AD`<%S(DNb1?vLIdPlhM=LA@b*xJRl6 z)G{#-#nBH?3Ao^!ikk+C%O4po`#PeTFZ350%4z=LJcC^wN13mj9s`1eNvA) zvZ_5U&xvAtX-=Hmj+aI)my&c@)e=$Di`+`ivrVW`*|z0Po&c)ZYj^uNRD?e3hktdq zQ8?y3uQiyjg-iDsAMt2(Ok}bE4ee8HZMraE-%6WUam)ij#;j-W!-GKlMBcr#Thdr- zks3-q<$`>%O6fP;&!Bil#OVDj0VqqH85s6s1Qp9uG=%;aK_#!5kR_cAlzp{&J2=QO zpV-QlJ$BzK7#lx*o7M($IEU%VN9-V@bcQm~=_5!4cvYJuV&}dHu!L|ggFxNV$7eOf zz&m)+z2xdY?A+1zm!SU%Ed4y5FYKv-(R6l!H1#Ubab>Xl#rg|WRKNe^NJ#^!i|i?F zGh-l>`@2|{W{k}*xVS?zkD^b*6#GX#4;fcEWe3B`yS)xvs3SUc}7}SzQ zd_C|!_V>?^2f0Z%Q9U|mK5&W&)xF$0d#$(>)m(aL>T)*#m9+@=t4XY)kN9#sLgsgl zd4KnNC4-{<_Ax%B$uItr_6evwhw%%f4}p!I*i$_=5co}?b+fVwL?ScOq}Ay_N_wX; zEu$J_Rikt?BcFks9pgQ+NC}W{%v?p!XhGp21NW+-B`CZYZ2De-`HU=7`X=YfKu&MT zpQlkDWUiEzX)e29{^3h{Y1Kv`+OYc*_f|3pc`PK!3cLiqi8I3uMkBz(#-Sae767&V=?+q1p_?%P|P@In8diMvLAMA-~RdYHGyhhgS6s!v%82726W(^<2ID-iP zO45O(3(4O%nI0fD&1Q$Ud;l^&OXqi7@AK21LJ9q%U-0zF-^29Cy=L;GgetkHjeiUa2SeoxPR7@wHuRGWY|q=$Df+t||gU z*+s=L>m#7$yOQ~@Y8ohTRKjmA>;v)e_e1CHJ|NJ`ma2ST1~}Wom&^SP2tm9?(yYHak-k!Rm z?in;}QT;7BqZW04X_M8jOh;`;x1O1BVf)CAQnV&4B~T+|hYO zBP<*$j`?ohc;C+#6$hR_$&}`LuYhyLG3f@64X{Qwf6dZm14iYJ1^=tqoL#l6AIqXA zP|4kvcq@t3R}EPzRWEM>0euLU4#N$=Wi;$@mf1iD1i0I6ACl2l{!O9hn%B|VFoj@Z z#Rs$;=+fsMZi(jbXnUVKk)p|T1=jhV4m7ry^Hai$5e+kV1S_2#L<1itjcD;tqu!=M z@zIzwsLyYJGV%Eh^vi=xYpA3TeIInQJe@U!>Y5j9@a&~gRSnL&lld&D(yndk{z3vO zKCRfESYD3GR&iO|6F#Cm>qK=vu4z=zgy&QFfEnAAnd z=I#|bQDO<;Bvd=?N7?}_uYMNKva|tXU3D1~H!jfGg;@(#Cjga=dj?gmIgs*2KZte0 z{Bmc8rv|Y+6!*MCp9{$W+Uw7b|2txUwzA0>H7{$Rb>2yV5LR5Ye6HI65#mI1wyf@& zm;Ry2GMjjU8)|6Ok;~pjGZ+1x%j0XM)kA;MTk)AX;!zJbiRTbMId;ENj#erPqc&@& z!>ANW)L|wtz4_i2+voN)k!73()m*l8q>R==<*w)VVuA-yNoVtB#m)vQa`tC?y-bL* zzsIWn?u$jmzK5OzCABF1h~{}rP!!4}jg@hB6GADsU;iqPAx8-VIGz(&Ui_%LQ0v0C z#be$dnvI`hr}-bmK^s56%fFKt1On>OiSZ+Rp#Dwl+*OeVjN!bQM-G31RXV$+>?Jl| z)3>O+ACipqPnhGWv2%%=K?l-DCxM5g!`PG~9Jn7f{~TV$;$~?W-JNa+Y@d53Zx+x1 z%e5;xCxcD`L-qO348LZ8)+QqL>jN90N)Kflpr8TLr+>OKvAz!>A#oP@Q8_j@%c!A1 zQ;!a~8|n-HsG}`c+iH63eaqGN&k$Q$J6g)Y*R9VqMRODk1r+pnXyONfQpUOr8r2NE z5k#(o{$8e|G@QjlUA}&26GLuGXQ-*bfc#6}9LS?n|z!$lK^c@$cLwK2DT-yToaIb_!*YEBSK1 zuSRKAnLho}Qv z4=FGiM~1cOVRM3cI~xc^+^jE(_jj)JOX~rzI}nMx)KI zkDHj8a?qLz*B`v=uGpO3a@R@dN7JJ3Txl}vLPb$*n<^&{0rzbXmW1@pbp4>b++0hbjuOABS-h*+a<&t6y3WGP_#@aNd4 zT#wL~5(P%pvty`1#%7<_svLdna2jMxzJc;7i8#Z&iqShW9Es3VkI|dd-*(9g`{;F8 zla?F5HA)b@@_zD~D9Zk9WfM7l3dMTHnHerU$KHSV8vKP?6np=aZ}(*7EfnJNxgPEC zVJ~~T%SAl4J?7o-1Eo7|{eSNeENhXODQW`R=QHPJ-<}0R4bm%`-$sBOGVx0vtOJ$R z|Fw6f|5$EOA16X6Zj_m1u1KYn;r`tj3L!&CWGGV-Aqi#75SfQ0q0AbDIGKk~5~o7u zP%=vh9f{|9UL5CNcwRiOuFrbA*8c2k|JGjn_nmq2b?zgDHB0uxtf4N2DEEYHiJ{s4LsW@Tk^T@(mkl4(9_HB@|g3a&@iqZ zeIVL`wegsfB$MO#t*=i@Be)$)NB43XElFd3x5xeMWOKp~F(k@>BMal+uJjaU!armu zmo~>+GYkwWuS@THgPx1x&WoXn_{64v_<~*_J_J2FA=1GDLR1f!JUfFo%@?$OUx`J1 z(@W80R%z5y^mLsgtWnz)5#8g*kFqNvI;ML5C~{LTnd~Dt12+1*)mA!CTd;n>eBiR39_zS+P~)Eotnf?=`SL9^VB!EsmE~1?Nj^Q4XuqjxH1Gv)IZ4Fh$km! z+WEG*VCiAB5&cS93f=jia!c}mDD*3b>rV|5b0amRl@T@) zg?|5J?o9I$3ccdpoi(@miFfTgwz=9XP-rzsi3bD(C{)eWUhU$-_|x^SEHot!zf5Zs z7TBL+!J)uz=9d=cl=Z@7x7%Urv1gLbZV>*8H{VJpCt_uHuW)LcKn1_2eRXjc z?)&|?Sii#wNqU9r_rCEVsv%>+KrbC(^(=39+k_$buAHLf+->-BNUNI8R3bxM^jPyy zD#4lecN#5wg6LUOCMU@jIESieI!2_yK9u>eqbeONNRewb#sB#UT%`6ow>9C#MjgIo zaM3=tFvpyy;hP5+iFy66%ygPb7qGTB_Monq57t|kxk`w6391K7=_C1U6sj)+!!--r zu{KdXSaGNnYvrm%_LoGkDps0CI_-!RF6$tnCttD1A$htkdIIy|V|VGKHeqH;aBKT) zE5^+EIP`}jG4h*v?F`>Kh9bFsL_EkwpInD{+ZPE`MZ_8Kr+cAA2Oaj%aH~ z^27r=L@}wq7p0a!Kukn*k{Tm?)86zkshPp+VsepA&~~`e|9v!_o8Z)KEi5aLpo2r9 zI?pGDy9n3n{_ECF7`{^#*%|M)R372CN@tA>bdMD`)$E zBk<{ifJmi8xbtkXC-Z-Vt4KEuAD0K5M6UMp+_?qo6AF^@7aWnAvL<9IK!1@s0-W=aGWp*R{9$WLh_QWh3r6_U0USmnOdS zjlGLYZovej!}dSca+uEOFtPbDfa&wMQZ3v=F)iqJwEOEIrmB{IY)#X^IC~pkVp|Qy zDk_dkd(va1fmFYzelNaq^kzv*uwy`~*79b0K6+RZW<7c+Xdg6gy7Zz7t=@07c}JSi z60SR1nb(ho^{5C+RuAf`3h&?jCkAEpq#ZdzTTogc&8NQ02L-It-}xVUB8RcjF>JdZ zGL4SiJ9$V68LggHe2?3ZCf>Mj!7>U-{5i4x=1-7h(xoG#Ux9G4+p=}gL4>{8l|Z$= z9}mY>{LT5P5Xbf2cqXhGH}!P97F8S(e4XvOFxxH^vIt&1pzs{-$C=`6T@T`VRcGt2 zolLOPW3tiM!VGIk7J;Kp?_l{szFBpJ9Y&0i^Nbg$asP@*w+|;3PTg@8G-2KY|3_UG zeigfMEPBsQzlA*@4?5HEloDL&Q2(%q*^Z68Ye!VGpS66u5r0$+)D;XFK z3}v-BRYh9ii@)NbKf*E?uH2qO&<`=p>BeJSDT$$UTALKY2eI!@R`gXbPxNJp%0Htj zMpuOGwWngr=**neQg%zmd#3$Mi#^k5*;!bTFk^v6*KaF)1yQJOIwH=+!H%j5QJ3R) zI#H_ndBv@co8a4!xO$~>kv~3kF`R+my@a&dF4kuu?aZk(hx4sS?3BoFx&D3s^_QBJmrh1Ty~T&KJE8>`DCp$d|8Adw z6h3vT82XT|p_AW*Z^DfV?adu&GSww$r)El=CiG?7?^!J#$sxC<|}o!ZEX_Ix=4SdZaX2nEYReO zb{;ZAcw&DB6Z|g*l~<9L$w;Pa2o)mycO%7rY}M7egP@x?)W_-x-Yv7N$u}QA1YbHO z7tiE|fMt;6UGi~Vu4vO5f9uiErtq=Q?je}A$qi~qIbaip67I%9x4V|ZUA)tn= zc%A_AnQtv$P!gztOXu6~T}8$OZ*wR+#;*j%wxKLBvB4R><+}SEvatLBa{=>6oxJNZ+saF67t*cf0)VS2U{NU7*%&7BK~% zSjQA|)HyKBk|HdFsW;aD;o}Qgx#@lzacu>+#QW!xXxb_nW6fcOW*7a&D4wl&+Yz(J zw{H!vhx>PK`AYciH&W-M+N_|VAW9{B^bTtG`qfc8_@mZ7LzBM#3CdMg%yr5}@zS|& zHILRD&%ebl>vUA$v1-N(3Fb;k=P)sbk%L(k4Vgal)n z_o=5C5hnKMF(>CygrDx_+Lx;b?{u{c1ukW{i4Pg{%|3@~LHQ*2|8heZeeIM=D&Ve0 zHF7rE1GX74ks8RDsurg~4b#3(o5kCS4W%pgYmu7gOF1hMS*K2j^|c z3o?i7*Ao7_pO*BU4$-yB*r-E^pfu-tX&uU6UA!khE{ZCq%_NlxUsTNrZ!Wa@3uOtM zb>}mQ^F*=q=P|-azB6by3Km#vEQG!Byj)R;=kPl6K3u%NqW>JZ1|dZkNd!Ml zdi`42u4vqojkb9h%Zd0HCA%sU5^h_Y?#veLM1f&NC+V*ss7>#&o_!Cl zJ3N2`$B8wrYyu zG%vqV_D=;^`)t?Td-NnIb2g!mw=P5N&t95Lb2>yn`df-#a~iHgDVn-|#gK2cdY0{$ z4Ku+U-d^GJ$Y^iX`=%a&!^)e;iR4$%aNbWjcfkmfXU|kD@y$SlXAf2Bs}P8o2jj%sAnRnYw$*%J0$6LuW6)+yN~vkat^etVJcAt`3DX2O^_S_3Jl!=EsITmKWTlOQ%&WT%+6ex@=j63f zZZ>3F=Kkbu96%OF=)xBIWjuH~WLfw*9_fY?4W3aHBpr3>W?i(x-2+9b!qa;Yk$RHe zy!1JO)~t)(UncesFP^e$lihGXI%(Ye9xPp;o??8HGU0Ro-#v=y>>7PIGFhGcNEMNPV9^4ehA8N zI3ty5C5(mj-6u*|AnC~OxS2Z&2RLptf1oZwxbpHTzKjThBfR`llGYvKJud z*KMP7QY}B>oLG{6xw0OXPp<5xi}?(@JhqXzoXxPhF|FpfqXIV1qMW@TRco>O>78s6nK3k>Idf1)gzw{c!{wio!i?Yidvx{-?!o zU={bHm>r968-rCUq*RRHOYL2~ex|Lj611w>#gRYr$iEkIyHr_Tf;zEpSY)xDJZZW4 zYIgq};{Wu;ab>!f++DLsmRfLuA$OQ+eLNqztlZIlJzk+)nULR5~b1?GD!$0XJwBli_;X=B5mx2W=9< z3<{sO!SwC%CEyrLk{qd+uG&I9^kj$FK~5MH4hA#vI6y~kd9Q1M9ZpY(39ap+1x5e# zKNTMO(6y9hkWqdDu`g_Fvkg<=jwuW9muP}e^^_b}n<@?rZFyY(?f{gNQ-q9}yTP8^ zI!|qG0)@mvyKC`^K-}|s)KaF-6U0-FdvCsyU+;^TU_vdi< z)_LA330fp>D^jCU@Ff4bSJ9%vWedKupMS-UlE^Y+rA6bZ~WaE^=>gbN~PW0002`000F!00962Q+{_flmJVE B5qAIp diff --git a/plotUtils/simps/simp_signal_2016.py b/plotUtils/simps/simp_signal_2016.py index f46411a2f..896aceddc 100644 --- a/plotUtils/simps/simp_signal_2016.py +++ b/plotUtils/simps/simp_signal_2016.py @@ -30,6 +30,7 @@ def __init__(self, mpifpi=4*np.pi, nsigma=1.5): self.mass_resolution = self.polynomial(.75739851, 0.031621002, 5.2949672e-05) self.radiative_fraction = self.polynomial(0.10541434, -0.0011737697, 7.4487930e-06, -1.6766332e-08) self.radiative_acceptance = self.polynomial(-0.48922505, 0.073733061, -0.0043873158, 0.00013455495, -2.3630535e-06, 2.5402516e-08, -1.7090900e-10, 7.0355585e-13, -1.6215982e-15, 1.6032317e-18) + self.psum_reweighting = self.polynomial(0.094272950, 0.87334446, -0.19641796) #GeV argument self.cr_psum_low = 1.9 self.cr_psum_high = 2.4 self.sr_psum_low = 1.0 @@ -84,6 +85,9 @@ def load_signal(self, filepath, pre_readout_filepath, mass_vd, selection, cut_ex ) events['vd_true_gamma'] = events.vd_true_vtx_energy * 1000 / mass_vd events['unc_vtx_min_z0'] = np.minimum(abs(events.unc_vtx_ele_track_z0), abs(events.unc_vtx_pos_track_z0)) + events['psum_reweight'] = self.psum_reweighting(events.unc_vtx_psum) + events['psum_reweight'] = ak.where(events['psum_reweight'] > 1., 1., events['psum_reweight']) + not_rebinned_pre_readout_z_h = self.load_pre_readout_signal_z_distribution(pre_readout_filepath) @@ -174,7 +178,7 @@ def get_exp_sig_eps2(self, signal_mass, signal_array, eps2): ) # the weight for a single event is the chance of that decay (z and gamma from either Vd) # multiplied by probability the event was from that z-bin in the original sample - signal_array['reweighted_accxEff'] = combined_decay_weight*signal_array.event_weight_by_uniform_z + signal_array['reweighted_accxEff'] = combined_decay_weight*signal_array.event_weight_by_uniform_z*signal_array.psum_reweight return signal_array From 8ddb8042ff315c1f6cc4f0adc83ebd011b193f40 Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Sun, 11 Aug 2024 09:38:01 -0700 Subject: [PATCH 19/27] unknown save state --- plotUtils/simps/run_opt_interval.py | 3 +- plotUtils/simps/simp_signal_2016.py | 9 ++- plotUtils/simps/simp_theory_equations.py | 89 +++++++++++++++--------- 3 files changed, 67 insertions(+), 34 deletions(-) diff --git a/plotUtils/simps/run_opt_interval.py b/plotUtils/simps/run_opt_interval.py index 53494e610..a8b84ec6d 100644 --- a/plotUtils/simps/run_opt_interval.py +++ b/plotUtils/simps/run_opt_interval.py @@ -240,7 +240,8 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): print(data_z) #Load MC Signal - indir = '/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/beam/smeared' + #indir = '/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/beam/smeared' + indir = '/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/beam/smeared_fixbeamspot' signal_pre_readout_path = lambda mass: f'/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/nobeam/mass_{mass}_simp_2pt3_slic_hadd_ana.root' signal_path = lambda mass: f'{indir}/mass_{mass}_hadd-simp-beam_ana_smeared_corr.root' signal_selection = 'vtxana_radMatchTight_2016_simp_SR_analysis' diff --git a/plotUtils/simps/simp_signal_2016.py b/plotUtils/simps/simp_signal_2016.py index 896aceddc..330745c88 100644 --- a/plotUtils/simps/simp_signal_2016.py +++ b/plotUtils/simps/simp_signal_2016.py @@ -31,6 +31,7 @@ def __init__(self, mpifpi=4*np.pi, nsigma=1.5): self.radiative_fraction = self.polynomial(0.10541434, -0.0011737697, 7.4487930e-06, -1.6766332e-08) self.radiative_acceptance = self.polynomial(-0.48922505, 0.073733061, -0.0043873158, 0.00013455495, -2.3630535e-06, 2.5402516e-08, -1.7090900e-10, 7.0355585e-13, -1.6215982e-15, 1.6032317e-18) self.psum_reweighting = self.polynomial(0.094272950, 0.87334446, -0.19641796) #GeV argument + self.minz0_cut_poly = self.polynomial(1.07620094e+00 + 0.1, -7.44533811e-03, 1.58745903e-05) self.cr_psum_low = 1.9 self.cr_psum_high = 2.4 self.sr_psum_low = 1.0 @@ -54,7 +55,10 @@ def load_data(self, filepath, selection, cut_expression=None, expressions=None): cut=cut_expression, expressions=expressions ) - events['unc_vtx_min_z0'] = np.minimum(abs(events.unc_vtx_ele_track_z0), abs(events.unc_vtx_pos_track_z0)) + try: + events['unc_vtx_min_z0'] = np.minimum(abs(events.unc_vtx_ele_track_z0), abs(events.unc_vtx_pos_track_z0)) + except: + pass return events @staticmethod @@ -357,7 +361,8 @@ def cnvHistoToROOT(histo, tempname='temporary_uproot'): for signal_mass in masses: #Load MC Signal - indir = '/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/beam/smeared' + #indir = '/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/beam/smeared' + indir = '/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/beam/smeared_fixbeamspot' signal_pre_readout_path = lambda mass: f'/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/nobeam/mass_{mass}_simp_2pt3_slic_hadd_ana.root' signal_path = lambda mass: f'{indir}/mass_{mass}_hadd-simp-beam_ana_smeared_corr.root' signal_selection = 'vtxana_radMatchTight_2016_simp_SR_analysis' diff --git a/plotUtils/simps/simp_theory_equations.py b/plotUtils/simps/simp_theory_equations.py index 85057ece6..d6cf16a56 100644 --- a/plotUtils/simps/simp_theory_equations.py +++ b/plotUtils/simps/simp_theory_equations.py @@ -1,9 +1,10 @@ import math import ROOT as r +import numpy as np class SimpEquations: def __init__(self, year = 2016, alpha_dark = 0.01, mass_ratio_Ap_to_Vd = 1.66, mass_ratio_Ap_to_Pid = 3.0, - ratio_mPi_to_fPi = 12.566, lepton_mass = 0.511): + ratio_mPi_to_fPi = 12.566, lepton_mass = 0.511): self.year = year self.alpha_dark = alpha_dark self.mass_ratio_Ap_to_Vd = mass_ratio_Ap_to_Vd @@ -23,8 +24,8 @@ def rate_Ap_ee(m_Ap, eps): @staticmethod def rate_2pi(m_Ap, m_pi, m_V, alpha_dark): coeff = (2.0 * alpha_dark / 3.0) * m_Ap - pow1 = math.pow((1 - (4 * m_pi * m_pi / (m_Ap * m_Ap))), 3 / 2.0) - pow2 = math.pow(((m_V * m_V) / ((m_Ap * m_Ap) - (m_V * m_V))), 2) + pow1 = np.power((1 - (4 * m_pi * m_pi / (m_Ap * m_Ap))), 3 / 2.0) + pow2 = np.power(((m_V * m_V) / ((m_Ap * m_Ap) - (m_V * m_V))), 2) return coeff * pow1 * pow2 @staticmethod @@ -32,59 +33,85 @@ def rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi): x = m_pi / m_Ap y = m_V / m_Ap Tv = 3.0/4.0 - coeff = alpha_dark * Tv / (192.0 * math.pow(math.pi, 4)) - return coeff * math.pow((m_Ap / m_pi), 2) * math.pow(m_V / m_pi, 2) * math.pow((m_pi / f_pi), 4) * m_Ap * math.pow(SimpEquations.Beta(x, y), 3 / 2.0) + coeff = alpha_dark * Tv / (192.0 * np.power(math.pi, 4)) + return coeff * np.power((m_Ap / m_pi), 2) * np.power(m_V / m_pi, 2) * np.power((m_pi / f_pi), 4) * m_Ap * np.power(SimpEquations.Beta(x, y), 3 / 2.0) def rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi): x = m_pi / m_Ap y = m_V / m_Ap Tv = 3.0/2.0 - coeff = alpha_dark * Tv / (192.0 * math.pow(math.pi, 4)) - return coeff * math.pow((m_Ap / m_pi), 2) * math.pow(m_V / m_pi, 2) * math.pow((m_pi / f_pi), 4) * m_Ap * math.pow(SimpEquations.Beta(x, y), 3 / 2.0) + coeff = alpha_dark * Tv / (192.0 * np.power(math.pi, 4)) + return coeff * np.power((m_Ap / m_pi), 2) * np.power(m_V / m_pi, 2) * np.power((m_pi / f_pi), 4) * m_Ap * np.power(SimpEquations.Beta(x, y), 3 / 2.0) def rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi): x = m_pi / m_Ap y = m_V / m_Ap Tv = 18.0 - ((3.0/2.0)+(3.0/4.0)) - coeff = alpha_dark * Tv / (192.0 * math.pow(math.pi, 4)) - return coeff * math.pow((m_Ap / m_pi), 2) * math.pow(m_V / m_pi, 2) * math.pow((m_pi / f_pi), 4) * m_Ap * math.pow(SimpEquations.Beta(x, y), 3 / 2.0) + coeff = alpha_dark * Tv / (192.0 * np.power(math.pi, 4)) + return coeff * np.power((m_Ap / m_pi), 2) * np.power(m_V / m_pi, 2) * np.power((m_pi / f_pi), 4) * m_Ap * np.power(SimpEquations.Beta(x, y), 3 / 2.0) @staticmethod def br_2pi(m_Ap, m_pi, m_V, alpha_dark, f_pi): - total_rate = SimpEquations.rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) +SimpEquations.rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_2pi(m_Ap, m_pi, m_V, alpha_dark) - if m_Ap > 2.0*m_V: - total_rate = total_rate + SimpEquations.rate_2V(m_Ap, m_V, alpha_dark) - return SimpEquations.rate_2pi(m_Ap, m_pi, m_V, alpha_dark)/total_rate + total_rate = (SimpEquations.rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + + SimpEquations.rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + + SimpEquations.rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + + SimpEquations.rate_2pi(m_Ap, m_pi, m_V, alpha_dark)) + + # Add the rate_2V contribution element-wise where m_Ap > 2.0 * m_V + total_rate += np.where(m_Ap > 2.0 * m_V, SimpEquations.rate_2V(m_Ap, m_V, alpha_dark), 0.0) + + return SimpEquations.rate_2pi(m_Ap, m_pi, m_V, alpha_dark) / total_rate @staticmethod def br_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi): - total_rate = SimpEquations.rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) +SimpEquations.rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_2pi(m_Ap, m_pi, m_V, alpha_dark) - if m_Ap > 2.0*m_V: - total_rate = total_rate + SimpEquations.rate_2V(m_Ap, m_V, alpha_dark) + total_rate = (SimpEquations.rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + + SimpEquations.rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + + SimpEquations.rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + + SimpEquations.rate_2pi(m_Ap, m_pi, m_V, alpha_dark)) + + # Add the rate_2V contribution element-wise where m_Ap > 2.0 * m_V + total_rate += np.where(m_Ap > 2.0 * m_V, SimpEquations.rate_2V(m_Ap, m_V, alpha_dark), 0.0) + return SimpEquations.rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) / total_rate @staticmethod def br_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi): - total_rate = SimpEquations.rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) +SimpEquations.rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_2pi(m_Ap, m_pi, m_V, alpha_dark) - if m_Ap > 2.0*m_V: - total_rate = total_rate + SimpEquations.rate_2V(m_Ap, m_V, alpha_dark) + total_rate = (SimpEquations.rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + + SimpEquations.rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + + SimpEquations.rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + + SimpEquations.rate_2pi(m_Ap, m_pi, m_V, alpha_dark)) + + # Add the rate_2V contribution element-wise where m_Ap > 2.0 * m_V + total_rate += np.where(m_Ap > 2.0 * m_V, SimpEquations.rate_2V(m_Ap, m_V, alpha_dark), 0.0) + return SimpEquations.rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) / total_rate @staticmethod def br_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi): - total_rate = SimpEquations.rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) +SimpEquations.rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_2pi(m_Ap, m_pi, m_V, alpha_dark) - if m_Ap > 2.0*m_V: - total_rate = total_rate + SimpEquations.rate_2V(m_Ap, m_V, alpha_dark) + total_rate = (SimpEquations.rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + + SimpEquations.rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + + SimpEquations.rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + + SimpEquations.rate_2pi(m_Ap, m_pi, m_V, alpha_dark)) + + # Add the rate_2V contribution element-wise where m_Ap > 2.0 * m_V + total_rate += np.where(m_Ap > 2.0 * m_V, SimpEquations.rate_2V(m_Ap, m_V, alpha_dark), 0.0) + return SimpEquations.rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) / total_rate @staticmethod def br_2V(m_Ap, m_pi, m_V, alpha_dark, f_pi): - total_rate = SimpEquations.rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) +SimpEquations.rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + SimpEquations.rate_2pi(m_Ap, m_pi, m_V, alpha_dark) - if m_Ap > 2.0*m_V: - total_rate = total_rate + SimpEquations.rate_2V(m_Ap, m_V, alpha_dark) - if 2 * m_V >= m_Ap: - return 0.0 - return SimpEquations.rate_2V(m_Ap, m_V, alpha_dark) / total_rate + total_rate = (SimpEquations.rate_Vrho_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + + SimpEquations.rate_Vphi_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + + SimpEquations.rate_Vcharged_pi(m_Ap, m_pi, m_V, alpha_dark, f_pi) + + SimpEquations.rate_2pi(m_Ap, m_pi, m_V, alpha_dark)) + + # Calculate the rate_2V contribution only where m_Ap > 2.0 * m_V and add it to total_rate + total_rate += np.where(m_Ap > 2.0 * m_V, SimpEquations.rate_2V(m_Ap, m_V, alpha_dark), 0.0) + + # Return 0.0 where 2 * m_V >= m_Ap + rate_2V_contrib = np.where((2.0 * m_V >= m_Ap) | (m_Ap <= 2.9 * m_V), 0.0, SimpEquations.rate_2V(m_Ap, m_V, alpha_dark)) + + return rate_2V_contrib / total_rate @staticmethod def Tv(rho, phi): @@ -97,7 +124,7 @@ def Tv(rho, phi): @staticmethod def Beta(x, y): - return (1 + math.pow(y, 2) - math.pow(x, 2) - 2 * y) * (1 + math.pow(y, 2) - math.pow(x, 2) + 2 * y) + return (1 + np.power(y, 2) - np.power(x, 2) - 2 * y) * (1 + np.power(y, 2) - np.power(x, 2) + 2 * y) @staticmethod def rate_2V(m_Ap, m_V, alpha_dark): @@ -108,7 +135,7 @@ def rate_2V(m_Ap, m_V, alpha_dark): def f(r): # Define your function f(r) here # Example: return some_expression - pass + return -1. @staticmethod def rate_2l(m_Ap, m_pi, m_V, eps, alpha_dark, f_pi, m_l, rho): @@ -142,4 +169,4 @@ def totalApProductionRate(m_Ap, eps, radFrac, radAcc, dNdm): return apProduction - + From a679814aaad5e250ad0111a9f94d17ee63274d92 Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Wed, 4 Sep 2024 15:07:49 -0700 Subject: [PATCH 20/27] cleaning --- plotUtils/simps/plot_final_results.py | 209 ++++++++++++++++++++++++++ plotUtils/simps/run_opt_interval.py | 14 +- plotUtils/simps/run_signal_search.py | 2 +- plotUtils/simps/simp_signal_2016.py | 82 ++++++++-- 4 files changed, 285 insertions(+), 22 deletions(-) create mode 100644 plotUtils/simps/plot_final_results.py diff --git a/plotUtils/simps/plot_final_results.py b/plotUtils/simps/plot_final_results.py new file mode 100644 index 000000000..f30193ab3 --- /dev/null +++ b/plotUtils/simps/plot_final_results.py @@ -0,0 +1,209 @@ +#!/usr/bin/env python +import os +import awkward as ak +import numpy as np +import hist +from hist import Hist +import uproot +import ROOT as r +import copy +import matplotlib.pyplot as plt +import matplotlib as mpl +import matplotlib.gridspec as gridspec +import sys +import math + +#Import simp class where systematics info is stored +import simp_signal_2016 + +#format mpl plots. Should make a template... +plt.rcParams.update({'font.size': 60, # Font size for text + 'axes.titlesize': 60, # Font size for titles + 'axes.labelsize': 60, # Font size for axis labels + 'xtick.labelsize': 60, # Font size for x-axis tick labels + 'ytick.labelsize': 60, # Font size for y-axis tick labels + 'lines.linewidth':5.0, + 'legend.fontsize': 60}) # Font size for legend +plt.rcParams['font.family'] = 'DejaVu Sans' + +import argparse +parser = argparse.ArgumentParser(description='') +parser.add_argument('--infile_signal_search', type=str, default='.simp_2016_results/unblinded_result_100pct.root') +parser.add_argument('--infile_signal_opt_interval', type=str, default='.simp_2016_results/unblinded_result_100pct.root') +parser.add_argument('--outdir', type=str, default='./search_results') +parser.add_argument('--mpifpi', type=float, default=4.*np.pi) + +args = parser.parse_args() +######################################################################################################################################## + +#Input file should be output of run_signal_search.py +infile_signal_search = args.infile_signal_search +#Grab search results (exp bkg, obs, local pvalues) +with uproot.open(infile) as f: + expected_bkg = f['expected_background'].values() + expected_bkg_errlow = f['expected_background'].errors('low')[1] + expected_bkg_errhigh = f['expected_background'].errors('high')[1] + Nobs = f['Nobs'].values() + local_pvalue = f['local_pvalue'].values() + local_pvalue_errlow = f['local_pvalue'].errors('low')[1] + local_pvalue_errhigh = f['local_pvalue'].errors('high')[1] + +#Calculate the "Look-Elsewhere Effect" correction. Total search window mass range divided by avg mass resolution +search_window = 1.5 #used in final analysis +signalProcessor = simp_signal_2016.SignalProcessor(args.mpifpi., search_window) +masses = expected_bkg[0] +masses = masses[np.where(masses <= 124.0)[0]] #I restricted search for signal to 124 MeV +avg_resolution = np.average( np.array([signalProcessor.mass_resolution(x) for x in masses]) ) +look_elsewhere = (masses[-1] - masses[0])/(avg_resolution) +print(f'Average mass resolution: {avg_resolution}') +print(f'Look elsewhere effect: {look_elsewhere}') +thresholds = [] +thresholds_lew = [] #thresholds corrected for look elsewhere +from scipy.stats import norm +for nsigma in [1,2,3,4,5]: + gaus_cdf = norm.cdf(nsigma) + threshold = (1.0 - gaus_cdf)/look_elsewhere + thresholds_lew.append(threshold) + thresholds.append((1.0 - gaus_cdf)) + +#top plot: expected background and observed events for all search windows +fig, ax = plt.subplots(2, 1, figsize=(60,60), gridspec_kw={'height_ratios': [3, 2]}) +gs = gridspec.GridSpec(2, 1, height_ratios=[10, 1]) +plt.subplot(2,1,1) +plt.errorbar(expected_bkg[0], expected_bkg[1], yerr=(expected_bkg_errlow, expected_bkg_errhigh), marker='o', markersize=20, color='blue', label='Estimated Background') +plt.plot(Nobs[0], Nobs[1], marker='o', color='black', markersize=20, label='Observed Events') +plt.legend() +plt.xlabel('$V_{D}$ Invariant Mass Search Window [MeV]') +plt.ylabel('Number Of Events') +plt.xlim(29.5,124.0) + +#bottom plot: local p-values and significance thresholds +plt.subplot(2,1,2) +plt.errorbar(local_pvalue[0], local_pvalue[1], yerr=(local_pvalue_errlow, local_pvalue_errhigh), marker='o', markersize=20, color='black', label='Local p-value') +from scipy.stats import norm +for n,nsigma in enumerate(thresholds): + if n < len(thresholds)-1: + plt.axhline(y=nsigma, linestyle='--', linewidth=4.0, color='black') + plt.axhline(y=thresholds_lew[n], linestyle='--', linewidth=4.0, color='red') + else: + plt.axhline(y=nsigma, linestyle='--', linewidth=4.0, color='black', label='Local $N\sigma$') + plt.axhline(y=thresholds_lew[n], linestyle='--', linewidth=4.0, color='red', label='Global LEE $N\sigma$') +plt.xlabel('$V_{D}$ Invariant Mass Search Window [MeV]') +plt.ylabel('p-value') +plt.legend() + +#Identify search window with larges p-value fluctuation (smallest p-value) +pvalmin = np.min(local_pvalue[1]) +pvalmin_mass = local_pvalue[0][np.argmin(local_pvalue[1])] +nsigma_local = np.round(norm.ppf(1 - pvalmin / 2),1) +nsigma_global = np.round(norm.ppf(1 - (pvalmin*look_elsewhere) / 2),1) +t = plt.text(80.0, .9e-4, f'Smallest p-value: {pvalmin} at {pvalmin_mass} MeV\nLocal Significance: {nsigma_local}$\sigma$\nGlobal Significance: {nsigma_global}$\sigma$') +t.set_bbox(dict(facecolor='white', alpha=1.0, edgecolor='black')) +plt.yscale('log') +plt.ylim(1e-6,1.1) +plt.xlim(29.5,124.0) +plt.savefig(f'{outdir}/signal_search_results.png') + +########################################################################################################################## +#Calculate exclusion contour with systematic uncertainties included +infile_signal_opt_interval = args.infile_signal_opt_interval +#no systematics included in these results. They are applied in this script by reducing the expected signal rate. +#Published result may prefer to generate more MC signal masses, re-calculate OIM, use ratio (OIM w sys)/(OIM w/o sys)... +with uproot.open(infile_signal_opt_interval) as f: + exclusion_h = f['sensitivity_ap_h'].to_hist() + expected_signal_h = f['total_yield_ap_h'].to_hist() + excluded_signal_h = f['excluded_signal_ap_h'].to_hist() + +sysProc = simp_signal_2016.SignalProcessor(args.mpifpi, search_window) +sysProc.systematic_uncertainties() +masses = expected_signal_h.axes[0].centers +sys_uncertainties = [] +for mass in masses: + #systematics are accessed through SignalProcessor + rad_targ_nom, rad_targ_mpt5, rad_targ_ppt5, simp_targ, mass_unc, radfrac_unc = sysProc.evaluate_polynomials(mass) + #for rad targ, take ratio of off-nominal to nominal. Max value is used. + rad_targ = np.maximum(rad_targ_mpt5/rad_targ_nom, rad_targ_ppt5/rad_targ_nom) - 1.0 + if rad_targ < 0.0: + rad_targ = 0.0 + #for simp targ, if ratio is greater than 1, set to 1 + simp_targ = 1.0-simp_targ + if simp_targ < 0.0: + simp_targ = 0.0 + sys_unc = np.sqrt( rad_targ**2 + simp_targ**2 + radfrac_unc**2 + mass_unc**2 ) + sys_uncertainties.append(sys_unc) + +#Plot systematic uncertaint as function of mass +fig, ax = plt.subplots(figsize=(40,20)) +plt.plot(masses, sys_uncertainties, marker='o',markersize=10, color='black') +plt.xlabel('A\' Invariant Mass [MeV]') +plt.ylabel('Systematic Uncertainty') +plt.ylim(0.0, 0.2) +plt.axhline(y=0.05, linestyle='--', color='darkred') +plt.axhline(y=0.1, linestyle='--', color='darkred') +plt.text(160, 0.15, 'HPS Preliminary\n(incomplete)', horizontalalignment='center') +plt.savefig(f'{outdir}/systematic_uncertainty_summary.png') + +#Rescale the expected signal according to the systematic uncertainty +values = expected_signal_h.values() +rescaled_values = np.zeros_like(values) +for m, mass in enumerate(masses): + rescaled_values[m, :] = values[m, :] * (1.0-sys_uncertainties[m]) +rescaled_signal_h = expected_signal_h.copy() +rescaled_signal_h[...] = rescaled_values + +#Divide rescaled signal by upper limit to get exclusion contour +new_exclusion = rescaled_values/excluded_signal_h.values() +rescaled_exclusion_h = exclusion_h.copy() +rescaled_exclusion_h[...] = new_exclusion + +#Apply systematics to expected signal... +fig, ax = plt.subplots(figsize=(40,30)) +rescaled_signal_h.plot() +plt.xlim(50,180) +plt.ylim(-6.5, -4.0) +plt.xlabel('A\' Invariant Mass [MeV]', fontsize=80) +plt.ylabel('$\log{\epsilon^2}$', fontsize=80) +plt.text(150, -4.4,f'HPS PRELIMINARY\n(Partial Systematics)', color='white', weight='bold', fontsize=80, horizontalalignment='center') +plt.text(156, -3.97,'Expected Signal', fontsize=80) +plt.savefig(f'{outdir}/expected_signal_2d.png') + +#save upper limit +fig, ax = plt.subplots(figsize=(40,30)) +excluded_signal_h.plot() +plt.xlim(50,180) +plt.ylim(-6.5, -4.0) +plt.xlabel('A\' Invariant Mass [MeV]', fontsize=80) +plt.ylabel('$\log{\epsilon^2}$', fontsize=80) +plt.text(165, -3.97,'Upper Limit', fontsize=80) +plt.text(150, -4.4,f'HPS PRELIMINARY', color='white', weight='bold', fontsize=80, horizontalalignment='center') +plt.savefig(f'{outdir}/upper_limit_2d.png') + +#Plot exclusion contour with systematics +fig, ax = plt.subplots(figsize=(40,30)) +rescaled_exclusion_h.plot(cmin=0.0) +plt.xlim(50,180) +plt.ylim(-6.5, -4.0) +plt.xlabel('A\' Invariant Mass [MeV]', fontsize=80) +plt.ylabel('$\log{\epsilon^2}$', fontsize=80) +plt.text(150, -4.4,f'HPS PRELIMINARY\n(Partial Systematics)', color='white', weight='bold', fontsize=80, horizontalalignment='center') +recenters_x = rescaled_exclusion_h.axes[0].centers +recenters_y = rescaled_exclusion_h.axes[1].centers +revalues = rescaled_exclusion_h.values() +reX, reY = np.meshgrid(recenters_x, recenters_y) +contour_levels = [1.0] +colors = ['red'] +recontour = ax.contour(reX, reY, revalues.T, levels=contour_levels, colors=colors) # Transpose values for correct orientation +ax.clabel(recontour, inline=1, fontsize=60) +plt.savefig(f'{outdir}/exclusion_contour_2d.png') + +#Write exclusion contour to .dat files +import csv +for l,level in enumerate(contour_levels): + for line in range(len(recontour.allsegs[l])): + print(line) + with open(f'{outdir}/exclusion_mpifpi_{args.mpifpi}_contour_line{line+1}.dat', 'w', newline='') as csvfile: + contour_line = recontour.allsegs[l][line] + data = [[x[0], np.sqrt(10**x[1])] for x in contour_line] + data.append([contour_line[0][0], np.sqrt(10**contour_line[0][1])]) + writer = csv.writer(csvfile, delimiter=' ') + writer.writerows(data) diff --git a/plotUtils/simps/run_opt_interval.py b/plotUtils/simps/run_opt_interval.py index a8b84ec6d..b125270a2 100644 --- a/plotUtils/simps/run_opt_interval.py +++ b/plotUtils/simps/run_opt_interval.py @@ -119,31 +119,31 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): #make histos to store results exclusion_conf_h = ( hist.Hist.new - .Reg(len(masses), np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') + .Reg(len(masses)-1, np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') .Double() ) exclusion_bestk_h = ( hist.Hist.new - .Reg(len(masses), np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') + .Reg(len(masses)-1, np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') .Double() ) total_yield_h = ( hist.Hist.new - .Reg(len(masses), np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') + .Reg(len(masses)-1, np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') .Double() ) excluded_signal_h = ( hist.Hist.new - .Reg(len(masses), np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') + .Reg(len(masses)-1, np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') .Double() ) sensitivity_h = ( hist.Hist.new - .Reg(len(masses), np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') + .Reg(len(masses)-1, np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') .Double() ) @@ -171,13 +171,13 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): #Plot the excluded signal value right before reaching 90% confidence. Debugging purposes excluded_signal_minus1_h = ( hist.Hist.new - .Reg(len(masses), np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') + .Reg(len(masses)-1, np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') .Double() ) exclusion_conf_minus1_h = ( hist.Hist.new - .Reg(len(masses), np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') + .Reg(len(masses)-1, np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') .Reg(len(eps2_range), min_eps,max_eps,label=r'$log10(\epsilon^2)$') .Double() ) diff --git a/plotUtils/simps/run_signal_search.py b/plotUtils/simps/run_signal_search.py index 6b743e592..2c04b6666 100644 --- a/plotUtils/simps/run_signal_search.py +++ b/plotUtils/simps/run_signal_search.py @@ -48,7 +48,7 @@ outfilename = f'{outfilename}_100pct' #Load 100% data print('Loading 100% Data') - inv_mass_range = (30,200) + inv_mass_range = (30,124) branches = ["unc_vtx_mass","unc_vtx_psum", "unc_vtx_ele_track_z0", "unc_vtx_pos_track_z0", "unc_vtx_z", "unc_vtx_proj_sig"] indir = '/fs/ddn/sdf/group/hps/users/alspellm/data_storage/pass4kf/pass4kf_ana_20240513' mass_safety = 'unc_vtx_mass*1000. >= 0' diff --git a/plotUtils/simps/simp_signal_2016.py b/plotUtils/simps/simp_signal_2016.py index 330745c88..2f6588826 100644 --- a/plotUtils/simps/simp_signal_2016.py +++ b/plotUtils/simps/simp_signal_2016.py @@ -6,12 +6,12 @@ import uproot import math import ROOT as r -import matplotlib as mpl +#import matplotlib as mpl import copy -import mplhep -import matplotlib.pyplot as plt -from matplotlib.backends.backend_pdf import PdfPages -mpl.style.use(mplhep.style.ROOT) +#import mplhep +#import matplotlib.pyplot as plt +#from matplotlib.backends.backend_pdf import PdfPages +#mpl.style.use(mplhep.style.ROOT) import argparse from simp_theory_equations import SimpEquations as simpeqs @@ -40,6 +40,9 @@ def __init__(self, mpifpi=4*np.pi, nsigma=1.5): self.trident_differential_production = None + def set_radiative_acceptance(self, *coefficients): + self.radiative_acceptance = self.polynomial(*coefficients) + @staticmethod def polynomial(*coefficients): def _implementation(x): @@ -175,10 +178,13 @@ def get_exp_sig_eps2(self, signal_mass, signal_array, eps2): rho_decay_weight = (np.exp((self.target_pos - signal_array.vd_true_vtx_z) / rho_gctau) / rho_gctau) phi_decay_weight = (np.exp((self.target_pos - signal_array.vd_true_vtx_z) / phi_gctau) / phi_gctau) + signal_array['reweighted_accxEff_rho'] = simpeqs.br_Vrho_pi(mass_ap, mass_pid, signal_mass, self.alpha_dark, fpid)*rho_decay_weight*signal_array.event_weight_by_uniform_z*signal_array.psum_reweight + print(simpeqs.br_Vrho_pi(mass_ap, mass_pid, signal_mass, self.alpha_dark, fpid)) + signal_array['reweighted_accxEff_phi'] = simpeqs.br_Vphi_pi(mass_ap, mass_pid, signal_mass, self.alpha_dark, fpid)*phi_decay_weight*signal_array.event_weight_by_uniform_z*signal_array.psum_reweight combined_decay_weight = ( - rho_decay_weight * simpeqs.br_Vrho_pi(mass_ap, mass_pid, signal_mass, self.alpha_dark, fpid) - + phi_decay_weight * simpeqs.br_Vphi_pi(mass_ap, mass_pid, signal_mass, self.alpha_dark, fpid) + (rho_decay_weight * simpeqs.br_Vrho_pi(mass_ap, mass_pid, signal_mass, self.alpha_dark, fpid)) + + (phi_decay_weight * simpeqs.br_Vphi_pi(mass_ap, mass_pid, signal_mass, self.alpha_dark, fpid)) ) # the weight for a single event is the chance of that decay (z and gamma from either Vd) # multiplied by probability the event was from that z-bin in the original sample @@ -240,7 +246,7 @@ def sameside_z0_cut(array): @staticmethod def zcut_sel(array): sel = ( - (array.unc_vtx_z > -4.3) + (array.unc_vtx_z > -4.8) ) return sel @@ -303,6 +309,50 @@ def cnvHistoToROOT(histo, tempname='temporary_uproot'): infile.Close() return root_hist + def systematic_uncertainties(self): + #using a +- 1.5 sigma search window with all cuts frozen to 2016 displaced simp dissertation (Alic) + self.radacc_targetz_nominal = self.polynomial(0.24083419, -0.017612076, 0.00037553660, -1.0223921e-06, -3.8793240e-08, + 4.2199609e-10, -1.6641414e-12, 2.3433278e-15) + self.radacc_targetz_Mpt5 = self.polynomial(0.22477846, -0.015984559, 0.00030943435, 3.6182165e-07, -5.4820194e-08, + 5.2531952e-10, -2.0102027e-12, 2.8109430e-15) + self.radacc_targetz_Ppt5 = self.polynomial( 0.22779999, -0.016020742, 0.00029960205, 7.6823260e-07, -6.0956281e-08, + 5.6914810e-10, -2.1602258e-12, 3.0094146e-15) + self.simp_targetz = self.polynomial(-1.38077250e+00, 8.00749424e-02, -9.78327706e-04, 5.13294008e-06, -9.77393492e-09) + self.mass_unc = 0.043 + self.radfrac = 0.07 + + def evaluate_polynomials(self, mass): + nominal_values = self.radacc_targetz_nominal(mass) + Mpt5_values = self.radacc_targetz_Mpt5(mass) + Ppt5_values = self.radacc_targetz_Ppt5(mass) + simp_values = self.simp_targetz(mass) + + return nominal_values, Mpt5_values, Ppt5_values, simp_values, self.mass_unc, self.radfrac + + @staticmethod + def inject_signal_mc(signal, data, nevents=100): + #Find the maximum signal weight + max_weight = np.max(signal.expected_signal_weight) + events_thrown = 0 + thrown_mask = [] + #sample signal until requested nevents thrown + while events_thrown < nevents: + #Randomly select a signal event + rint = np.random.randint(0,len(signal.expected_signal_weight)-1) + random_event = signal[rint] + #Randomly sample the weight distribution. If the sampled weight < event weight, throw the event + rweight = np.random.uniform(0, max_weight) + if rweight < random_event.expected_signal_weight: + events_thrown += 1 + thrown_mask.append(rint) + + thrown_events = signal[thrown_mask] + thrown_events['weight'] = 1.0 + + #combine mass and min z0 into array and inject into data + injected_data = ak.concatenate([data, thrown_events]) + return injected_data, thrown_events + if __name__ == '__main__': parser = argparse.ArgumentParser(description='Process some inputs.') @@ -334,9 +384,14 @@ def cnvHistoToROOT(histo, tempname='temporary_uproot'): processor.set_diff_prod_lut(cr_data, preselection, signal_mass_range, tenpct, full_lumi_path) #Initialize the range of epsilon2 - masses = [x for x in range(30,124,2)] - masses = [x for x in range(50,70,2)] - ap_masses = [round(x*processor.mass_ratio_ap_to_vd,1) for x in masses] + mass_max = 50 + mass_min = 30 + mass_step = 2 + ap_step = round(mass_step*processor.mass_ratio_ap_to_vd,1) + masses = np.array([x for x in range(mass_min, mass_max+mass_step, mass_step)]) + ap_masses = np.array([round(x*processor.mass_ratio_ap_to_vd,1) for x in masses]) + print(masses) + print(ap_masses) eps2_range = np.logspace(-4.0,-8.0,num=40) logeps2_range = np.log10(eps2_range) min_eps = min(np.log10(eps2_range)) @@ -346,18 +401,17 @@ def cnvHistoToROOT(histo, tempname='temporary_uproot'): #Define all histograms expected_signal_vd_h = ( hist.Hist.new - .Reg(len(masses), np.min(masses), np.max(masses), label='Vd Invariant Mass [MeV]') + .Reg(len(masses), np.min(masses), np.max(masses)+mass_step, label='Vd Invariant Mass [MeV]') .Reg(num_bins, min_eps, max_eps,label=f'$log_{10}(\epsilon^2)$') .Double() ) expected_signal_ap_h = ( hist.Hist.new - .Reg(len(ap_masses), np.min(ap_masses), np.max(ap_masses), label='A\' Invariant Mass [MeV]') + .Reg(len(ap_masses), np.min(ap_masses), np.max(ap_masses)+ap_step, label='A\' Invariant Mass [MeV]') .Reg(num_bins, min_eps, max_eps,label=f'$log_{10}(\epsilon^2)$') .Double() ) - for signal_mass in masses: #Load MC Signal From 939b492d2628eb691f02610057a14825003e8f98 Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Wed, 4 Sep 2024 15:59:58 -0700 Subject: [PATCH 21/27] adding systematic scripts --- .../simps/systematics/minz0_systematic.py | 286 ++++++++++++ ...tive_acceptance_systematic-misalignment.py | 410 ++++++++++++++++++ .../radiative_acceptance_systematic.py | 284 ++++++++++++ .../signal_misalignment_systematic.py | 84 ++++ .../signal_target_position_systematic.py | 271 ++++++++++++ .../simps/systematics/v0projsig_systematic.py | 113 +++++ 6 files changed, 1448 insertions(+) create mode 100644 plotUtils/simps/systematics/minz0_systematic.py create mode 100644 plotUtils/simps/systematics/radiative_acceptance_systematic-misalignment.py create mode 100644 plotUtils/simps/systematics/radiative_acceptance_systematic.py create mode 100644 plotUtils/simps/systematics/signal_misalignment_systematic.py create mode 100644 plotUtils/simps/systematics/signal_target_position_systematic.py create mode 100644 plotUtils/simps/systematics/v0projsig_systematic.py diff --git a/plotUtils/simps/systematics/minz0_systematic.py b/plotUtils/simps/systematics/minz0_systematic.py new file mode 100644 index 000000000..7fb84ec02 --- /dev/null +++ b/plotUtils/simps/systematics/minz0_systematic.py @@ -0,0 +1,286 @@ +#!/usr/bin/python3 +import os +import awkward as ak +import numpy as np +import hist +from hist import Hist +import uproot +import ROOT as r +import copy +import matplotlib.pyplot as plt +import matplotlib as mpl +import mplhep +import matplotlib.gridspec as gridspec +import sys +import math + +hpstr_base = os.getenv('HPSTR_BASE') +sys.path.append(f'{hpstr_base}/plotUtils/simps') +import simp_signal_2016 + +#format mpl plots +plt.rcParams.update({'font.size': 40, # Font size for text + 'axes.titlesize': 40, # Font size for titles + 'axes.labelsize': 40, # Font size for axis labels + 'xtick.labelsize': 40, # Font size for x-axis tick labels + 'ytick.labelsize': 40, # Font size for y-axis tick labels + 'lines.linewidth':3.0, + 'legend.fontsize': 40}) # Font size for legend +plt.rcParams['font.family'] = 'DejaVu Sans' + +import argparse +parser = argparse.ArgumentParser(description='') +parser.add_argument('--outdir', type=str, default='./search_results') +parser.add_argument('--mpifpi', type=float, default=4.*np.pi) + +args = parser.parse_args() +outdir = args.outdir +################################################################################################################################ +def get_rand(x=False): + if x: + return np.random.uniform(80,120)*0.01 + else: + return 1.0 + +def fit_root_gaussian(histo, fix_xmin=None, fix_xmax=None, max_attempts=50, nsigma=2.0): + if fix_xmin: + xmin = fix_xmin + else: + xmin = histo.GetXaxis().GetXmin() + if fix_xmax: + xmax = fix_xmax + else: + xmax = histo.GetXaxis().GetXmax() + + # Set initial guesses for Gaussian parameters + gaussian = r.TF1("gaussian", "gaus", xmin, xmax) + mean = histo.GetMean() + sigma = histo.GetRMS() + norm = histo.GetMaximum() + gaussian.SetParameters(norm, mean, sigma) + + #initial fit + fit_result = histo.Fit(gaussian, "ES", "", xmin, xmax) + params = fit_result.Parameters() + chi2 = fit_result.Chi2() + ndf = fit_result.Ndf() + best_chi2 = chi2/ndf + best_params = params + + rand = False + for attempt in range(max_attempts): + gaussian.SetParameters(best_params[0]*get_rand(rand), best_params[1]*get_rand(rand), best_params[2]*get_rand(rand)) + if not fix_xmin: + xmin = best_params[1] - nsigma*(best_params[2]) + if not fix_xmax: + xmax = best_params[1] + nsigma*(best_params[2]) + + fit_result = histo.Fit(gaussian, "QES", "", xmin, xmax) + + # Check if fit was successful + if not fit_result.IsValid(): + rand = True + continue + + params = fit_result.Parameters() + chi2 = fit_result.Chi2() + ndf = fit_result.Ndf() + if chi2/ndf < best_chi2: + best_chi2 = chi2/ndf + best_params = params + rand = False + else: + rand = True + + gaussian.SetParameters(best_params[0], best_params[1], best_params[2]) + if not fix_xmin: + xmin = best_params[1] - nsigma*(best_params[2]) + if not fix_xmax: + xmax = best_params[1] + nsigma*(best_params[2]) + fit_result = histo.Fit(gaussian, "QES", "", xmin, xmax) + #params = np.round(fit_result.Parameters(),4) + #errors = np.round(fit_result.Errors(),4) + params = fit_result.Parameters() + errors = fit_result.Errors() + chi2 = fit_result.Chi2() + ndf = fit_result.Ndf() + + params = np.array([float(f"{x:.2e}") for x in params]) + errors = np.array([float(f"{x:.2e}") for x in errors]) + + return histo, params, errors, chi2/ndf + +def cnv_root_to_np(histo): + nbins = histo.GetNbinsX() + xvals = np.array([histo.GetBinCenter(x+1) for x in range(nbins+1)]) + yvals = np.array([histo.GetBinContent(x+1) for x in range(nbins+1)]) + errors = np.array([histo.GetBinError(x+1) for x in range(nbins+1)]) + underflow = histo.GetBinContent(0) + overflow = histo.GetBinContent(nbins+1) + + #add over/underflow + xvals = np.insert(xvals, 0, xvals[0]-histo.GetBinWidth(1)) + yvals = np.insert(yvals, 0, underflow) + xvals = np.append(xvals, xvals[-1]+histo.GetBinWidth(1)) + yvals = np.append(yvals, overflow) + errors = np.insert(errors, 0, 0.0) + errors = np.append(errors, 0.0) + + #get fit function if it exist + x_fit = None + y_fit = None + if len(histo.GetListOfFunctions()) > 0: + fitfunc = histo.GetListOfFunctions()[0] + x_fit = np.linspace(fitfunc.GetXmin(), fitfunc.GetXmax(), int((fitfunc.GetXmax()-fitfunc.GetXmin())/histo.GetBinWidth(1))) + y_fit = np.array([fitfunc.Eval(x) for x in x_fit]) + + return (xvals, yvals, errors), (x_fit, y_fit) + +################################################################################################################################### + +#Load signal processor +search_window = 1.5 #used in final search +signalProcessor = simp_signal_2016.SignalProcessor(args.mpifpi, search_window) + +#Read in data, MC, and signal +samples = {} +branches = ["unc_vtx_ele_track_z0","unc_vtx_pos_track_z0"] + +#Read 10% Data +infile = '/sdf/group/hps/user-data/alspellm/2016/data/hadd_BLPass4c_1959files.root' +selection = 'vtxana_Tight_L1L1_nvtx1' +samples['data'] = signalProcessor.load_data(infile,selection, expressions=branches, cut_expression='((unc_vtx_psum > 1.0) & (unc_vtx_psum < 1.9) )') +samples['data']['weight'] = 1.0 #Assign weight of 10 to scale up to full lumi + +#Load MC background +lumi = 10.7*.1 #pb-1 +mc_scale = {'data' : 1.0, + 'tritrig' : 1.416e9*lumi/(50000*10000), + 'wab' : 0.1985e12*lumi/(100000*10000)} + +#tritrig +infile = '/sdf/group/hps/user-data/alspellm/2016/tritrig_mc/pass4b/hadded_tritrig-beam-10kfiles-ana-smeared-corr_beamspotfix.root' +samples['tritrig'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.0) & (unc_vtx_psum < 1.9) )', expressions=branches) +samples['tritrig']['weight'] = mc_scale['tritrig'] + +#wab +infile = '/sdf/group/hps/user-data/alspellm/2016/wab_mc/pass4b/hadded_wab-beam-10kfiles-ana-smeared-corr_beamspotfix.root' +samples['wab'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.0) & (unc_vtx_psum < 1.9) )', expressions=branches) +samples['wab']['weight'] = mc_scale['wab'] + +#After smearing factor has been calculated, set to true to compare z0 distributions with smeared MC background +smear = True +if smear: + mc_sigma = 0.1251 + data_sigma = 0.1348 + smearF = np.sqrt(data_sigma**2 - mc_sigma**2) + + #smear tritrig + rel_smear = np.random.normal(0.0, 1.0, len(samples['tritrig'].unc_vtx_min_z0)) + smearfactors = rel_smear*smearF + samples['tritrig']['unc_vtx_ele_track_z0'] = smearfactors + samples['tritrig']['unc_vtx_ele_track_z0'] + rel_smear = np.random.normal(0.0, 1.0, len(samples['tritrig'].unc_vtx_min_z0)) + smearfactors = rel_smear*smearF + samples['tritrig']['unc_vtx_pos_track_z0'] = smearfactors + samples['tritrig']['unc_vtx_pos_track_z0'] + + #smear wab + rel_smear = np.random.normal(0.0, 1.0, len(samples['wab'].unc_vtx_min_z0)) + smearfactors = rel_smear*smearF + samples['wab']['unc_vtx_ele_track_z0'] = smearfactors + samples['wab']['unc_vtx_ele_track_z0'] + rel_smear = np.random.normal(0.0, 1.0, len(samples['wab'].unc_vtx_min_z0)) + smearfactors = rel_smear*smearF + samples['wab']['unc_vtx_pos_track_z0'] = smearfactors + samples['wab']['unc_vtx_pos_track_z0'] + + +#Plot z0 for data and MC backgrounds +z0_h = ( + hist.Hist.new + .StrCategory(list(samples.keys()), name='samples') + .Reg(200, -1.,1.,label=r'Vertical Track Impact Parameter [mm]') + .Double() +) + +#Fill without weights, so that histos can be converted to ROOT and retain statistical uncertainty +z0_histos = {} +for sname, sample in samples.items(): + z0_h.fill(sname, sample.unc_vtx_ele_track_z0)#, weight=sample.weight/ak.sum(sample.weight)) + z0_h.fill(sname, sample.unc_vtx_pos_track_z0)#, weight=sample.weight/ak.sum(sample.weight)) + z0_histos[sname] = signalProcessor.cnvHistoToROOT(z0_h[sname,:]) + z0_histos[sname].Scale(mc_scale[sname]) + +#Scale Tritrig and WAB and combine with proper errors +z0_histos['tritrig_wab'] = z0_histos['tritrig'].Clone() +z0_histos['tritrig_wab'].Add(z0_histos['wab']) +#Normalize +for sname, sample in z0_histos.items(): + print(z0_histos[sname].Integral(0,-1)) + z0_histos[sname].Scale(1./z0_histos[sname].Integral(0,-1)) + +#Make plots of data vs MC background +fig, ax = plt.subplots(2,1, figsize=(25,30)) +#Data +plt.subplot(2,1,1) +plt.xlabel('Vertical Track Impact Parameter [mm]') +plt.ylabel('Normalized Events') +_, params, fiterrors, _ = fit_root_gaussian(z0_histos['data'], nsigma=1.5) +(xvals, yvals, errors), (x_fit, y_fit) = cnv_root_to_np(z0_histos['data']) +plt.errorbar(xvals, yvals, yerr=errors, marker='o', color='black', linestyle='', label='10% Data') +plt.plot(x_fit, y_fit, color='red', label='Fit 10% Data', linewidth=2.0) +plt.text(0.25, 0.015, f'Norm={params[0]}$\pm${fiterrors[0]} \n$\mu$={params[1]}$\pm${fiterrors[1]} \n$\sigma$={params[2]}$\pm${fiterrors[2]}') +plt.legend() +#MC +plt.subplot(2,1,2) +plt.xlabel('Vertical Track Impact Parameter [mm]') +plt.ylabel('Normalized Events') +_, params, fiterrors, _ = fit_root_gaussian(z0_histos['tritrig_wab'], nsigma=1.5) +(xvals, yvals, errors), (x_fit, y_fit) = cnv_root_to_np(z0_histos['tritrig_wab']) +plt.errorbar(xvals, yvals, yerr=errors, marker='o', color='darkblue', linestyle='', label='MC Background') +plt.plot(x_fit, y_fit, color='red', label='Fit MC Background', linewidth=2.0) +plt.text(0.25, 0.015, f'Norm={params[0]}$\pm${fiterrors[0]} \n$\mu$={params[1]}$\pm${fiterrors[1]} \n$\sigma$={params[2]}$\pm${fiterrors[2]}') +plt.legend() +plt.ylim(0.0, 0.03) +plt.savefig(f'{outdir}/impact_parameter_data_v_mc_smeared_{smear}.png') + +####################################################################################################################################### + +#Smear the signal using ratio of data and MC widths +sysvals = [] +masses = [] +indir = '/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/beam/smeared_fixbeamspot' +for mass in range(30,120,4): + masses.append(mass) + signal_pre_readout_path = lambda mass: f'/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/nobeam/mass_{mass}_simp_2pt3_slic_hadd_ana.root' + signal_path = lambda mass: f'{indir}/mass_{mass}_hadd-simp-beam_ana_smeared_corr.root' + signal_selection = 'vtxana_radMatchTight_2016_simp_SR_analysis' + signal = signalProcessor.load_signal(signal_path(signal_mass), signal_pre_readout_path(signal_mass), signal_mass, signal_selection) + signal['weight']=1.0 + psum_sel = signalProcessor.psum_sel(signal, case='sr') + + #smearing factors calculated from comparing data and MC bkg z0 widths + mc_sigma = 0.1251 + data_sigma = 0.1348 + smearF = np.sqrt(data_sigma**2 - mc_sigma**2) + + #smear signal minz0 + rel_smear = np.random.normal(0.0, 1.0, len(signal.unc_vtx_min_z0)) + smearfactors = rel_smear*smearF + signal['unc_vtx_ele_track_z0_smeared'] = smearfactors + signal['unc_vtx_ele_track_z0'] + + rel_smear = np.random.normal(0.0, 1.0, len(signal.unc_vtx_min_z0)) + smearfactors = rel_smear*smearF + signal['unc_vtx_pos_track_z0_smeared'] = smearfactors + signal['unc_vtx_pos_track_z0'] + + #calculate smeared minz0 + signal['unc_vtx_min_z0_smeared'] = np.minimum(abs(signal['unc_vtx_ele_track_z0_smeared']), abs(signal['unc_vtx_pos_track_z0_smeared'])) + + #Calculate change in efficiency + unsmeared_sel = signal.unc_vtx_min_z0 > signalProcessor.minz0_cut_poly(signal.unc_vtx_mass*1000.) + smeared_sel = signal.unc_vtx_min_z0_smeared > signalProcessor.minz0_cut_poly(signal.unc_vtx_mass*1000.) + unsmeared_eff = ak.sum(signal[unsmeared_sel].weight)/ak.sum(signal.weight) + smeared_eff = ak.sum(signal[smeared_sel].weight)/ak.sum(signal.weight) + systematic = smeared_eff/unsmeared_eff + sysvals.append(systematic) + + #final systematic stored in sysvals + print(sysvals) + diff --git a/plotUtils/simps/systematics/radiative_acceptance_systematic-misalignment.py b/plotUtils/simps/systematics/radiative_acceptance_systematic-misalignment.py new file mode 100644 index 000000000..6237578cb --- /dev/null +++ b/plotUtils/simps/systematics/radiative_acceptance_systematic-misalignment.py @@ -0,0 +1,410 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[27]: + + +import os +import awkward as ak +import numpy as np +import hist +from hist import Hist +import uproot +import ROOT as r +import copy + +import matplotlib.pyplot as plt +import matplotlib as mpl +import mplhep +import matplotlib.gridspec as gridspec + +import sys +sys.path.append('/sdf/group/hps/user-data/alspellm/2016/plotting') +import hps_plot_utils as utils + +get_ipython().run_line_magic('matplotlib', 'inline') +mpl.style.use(mplhep.style.ROOT) +import math +import pickle + +sys.path.append('/sdf/home/a/alspellm/src/hpstr_v62208/plotUtils/simps') +import simp_signal_2016 +from simp_theory_equations import SimpEquations as simpeqs +import copy +# Set global font sizes +plt.rcParams.update({'font.size': 40, # Font size for text + 'axes.titlesize': 40, # Font size for titles + 'axes.labelsize': 40, # Font size for axis labels + 'xtick.labelsize': 40, # Font size for x-axis tick labels + 'ytick.labelsize': 40, # Font size for y-axis tick labels + 'lines.linewidth':3.0, + 'legend.fontsize': 40}) # Font size for legend +plt.rcParams['font.family'] = 'DejaVu Sans' + +def cnv_tgraph_to_np(tgraph): + # Number of points in the TGraph + npoints = tgraph.GetN() + + # Retrieve X and Y values + xvals = np.array([tgraph.GetX()[i] for i in range(npoints)]) + yvals = np.array([tgraph.GetY()[i] for i in range(npoints)]) + + # Errors are not directly available in TGraph; setting them to zero + errors = np.zeros(npoints) + + # Handle fit function if it exists + x_fit = None + y_fit = None + if len(tgraph.GetListOfFunctions()) > 0: + fitfunc = tgraph.GetListOfFunctions()[0] + x_fit = np.linspace(fitfunc.GetXmin(), fitfunc.GetXmax(), 100) # 100 points for the fit + y_fit = np.array([fitfunc.Eval(x) for x in x_fit]) + + return (xvals, yvals, errors), (x_fit, y_fit) + +def cnv_root_to_np(histo): + nbins = histo.GetNbinsX() + xvals = np.array([histo.GetBinCenter(x+1) for x in range(nbins+1)]) + yvals = np.array([histo.GetBinContent(x+1) for x in range(nbins+1)]) + errors = np.array([histo.GetBinError(x+1) for x in range(nbins+1)]) + underflow = histo.GetBinContent(0) + overflow = histo.GetBinContent(nbins+1) + + #add over/underflow + xvals = np.insert(xvals, 0, xvals[0]-histo.GetBinWidth(1)) + yvals = np.insert(yvals, 0, underflow) + xvals = np.append(xvals, xvals[-1]+histo.GetBinWidth(1)) + yvals = np.append(yvals, overflow) + errors = np.insert(errors, 0, 0.0) + errors = np.append(errors, 0.0) + + #get fit function if it exist + x_fit = None + y_fit = None + if len(histo.GetListOfFunctions()) > 0: + fitfunc = histo.GetListOfFunctions()[0] + x_fit = np.linspace(fitfunc.GetXmin(), fitfunc.GetXmax(), int((fitfunc.GetXmax()-fitfunc.GetXmin())/histo.GetBinWidth(1))) + y_fit = np.array([fitfunc.Eval(x) for x in x_fit]) + + return (xvals, yvals, errors), (x_fit, y_fit) + +def fit_plot_with_poly(plot, tgraph=False, specify_n=None, set_xrange=False, xrange=(0.0, 1.0)): + polys = [] + chi2s = [] + fstats = [] + fit_resultults = [] + + if tgraph: + npoints = plot.GetN() + else: + npoints = 0 + nBins = plot.GetNbinsX() + for ibin in range(nBins): + if plot.GetBinContent(ibin) > 0: + npoints += 1 + pass + + + if not specify_n: + for n in range(12): + fitfunc = r.TF1(f'pol{n}',f'pol{n}') + fitfunc.SetLineColor(r.kRed) + if set_xrange: + fitfunc.SetRange(xrange[0], xrange[1]) + fit_result = plot.Fit(fitfunc,"RSQ") + else: + fit_result = plot.Fit(fitfunc,"SQ") + fitfunc.SetLineColor(r.kRed) + fitfunc.SetMarkerSize(0.0) + chi2s.append(fit_result.Chi2()) + polys.append(n) + fit_resultults.append(fit_result) + + #Perform fstat test to see how much fit improves with additional order (why does this work?) + if n > 0: + fstats.append( (chi2s[n-1]-chi2s[n])*(npoints-n-1)/(chi2s[n])) + else: + fstats.append(0.0) + + print(fstats) + return None, None + else: + fitfunc = r.TF1(f'pol{specify_n}',f'pol{specify_n}') + fitfunc.SetLineColor(r.kRed) + fitfunc.SetLineWidth(5) + if set_xrange: + fitfunc.SetRange(xrange[0], xrange[1]) + fit_result = plot.Fit(fitfunc,"RSQ") + else: + fit_result = plot.Fit(fitfunc,"SQ") + #params = np.round(fit_result.Parameters(),4) + #errors = np.round(fit_result.Errors(),4) + params = fit_result.Parameters() + errors = fit_result.Errors() + #return fit_result + return params, errors + +def polynomial(*coefficients): + def _implementation(x): + return sum([ + coefficient * x**power + for power, coefficient in enumerate(coefficients) + ]) + return _implementation + + +# In[3]: + + +signalProcessor = simp_signal_2016.SignalProcessor(np.pi*4., 1.5) +#V0 Projection Significance Data vs MC efficiency + +samples = {} +mcsamples = {} +branches = ["unc_vtx_mass", "unc_vtx_psum"] + +#LOAD NOMINAL +#rad +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/misalignments/hadd_2kfiles_rad_nobeam_nominal_recon_ana.root' +selection = 'vtxana_radMatchTight_nocuts' #USE RADMATCHTIGHT! +samples['nominal'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.9) & (unc_vtx_psum < 2.4) )', expressions=branches) +#mc ana +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/misalignments/hadd_2kfiles_rad_nobeam_nominal_mc_ana.root' +slicfile = r.TFile(infile, "READ") +mcsamples['nominal'] = copy.deepcopy(slicfile.Get('mcAna/mcAna_mc622Mass_h')) +slicfile.Close() + +#LOAD MISALIGNED +#rad +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/misalignments/hadd_2kfiles_rad_nobeam_misalignments_1_recon_ana.root' +selection = 'vtxana_radMatchTight_nocuts' #USE RADMATCHTIGHT! +samples['misaligned_v1'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.9) & (unc_vtx_psum < 2.4) )', expressions=branches) +#mc ana +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/misalignments/hadd_2kfiles_rad_nobeam_misalignments_1_mc_ana.root' +slicfile = r.TFile(infile, "READ") +mcsamples['misaligned_v1'] = copy.deepcopy(slicfile.Get('mcAna/mcAna_mc622Mass_h')) +slicfile.Close() + + +# In[4]: + + +outfile = uproot.recreate('radacc_misalignment_systematic_results.root') + + +# In[26]: + + +#plot radiative peak +psum_h = ( + hist.Hist.new + .StrCategory(list(samples.keys()), name='samples') + .Reg(50,1.9, 2.4,label='Psum [GeV]') + .Double() +) +colors=['black', 'darkred'] +fig, ax = plt.subplots(figsize=(25,15)) +for i,(sname, sample) in enumerate(samples.items()): + psum_h.fill(sname, sample.unc_vtx_psum) + #xvals = psum_h[sname,:].axes[0].centers + #yvals = psum_h[sname,:].values() + #plt.plot(xvals, yvals, color=colors[i],marker='o', markersize=20, mew=3, linestyle='',label=sname) +psum_h.plot(color=['black','darkred'], linewidth=3.0) +plt.legend() +plt.ylabel('Events') +plt.savefig('radiative_peak_misaligned.png') + + +# In[ ]: + + + + + +# In[102]: + + +nbinsx = mcsamples['nominal'].GetNbinsX() +first_bin = mcsamples['nominal'].GetBinLowEdge(1) +last_bin = nbinsx*mcsamples['nominal'].GetBinWidth(1) +invmass_h = ( + hist.Hist.new + .StrCategory(list(samples.keys()), name='samples') + .Reg(nbinsx,first_bin,last_bin,label='Invariant Mass [MeV]') + .Double() +) + +#Fill without weights, so that histos can be converted to ROOT and retain statistical uncertainty +invmass_histos = {} +for sname, sample in samples.items(): + invmass_h.fill(sname, sample.unc_vtx_mass*1000.) + invmass_histos[sname] = signalProcessor.cnvHistoToROOT(invmass_h[sname,:]) + invmass_histos[sname].Rebin(2) + mcsamples[sname].Rebin(2) + + +# In[103]: + + +def nonUniBinning(histo, start, size): + edges_a = np.arange(histo.GetBinLowEdge(1),start+histo.GetBinWidth(1),histo.GetBinWidth(1)) + edges_b = np.arange(start,histo.GetBinLowEdge(histo.GetNbinsX()), size) + bin_edges = np.concatenate([edges_a, edges_b[1:]]) + histo_rebinned = r.TH1F(f'{histo.GetName()}_rebinned', f'{histo.GetTitle()}', len(bin_edges)-1, bin_edges) + for bin in range(1, histo.GetNbinsX() + 1): + content = histo.GetBinContent(bin) + center = histo.GetBinCenter(bin) + error = histo.GetBinError(bin) + new_bin = histo_rebinned.FindBin(center) + histo_rebinned.SetBinContent(new_bin, histo_rebinned.GetBinContent(new_bin)+content) + histo_rebinned.SetBinError(new_bin, np.sqrt(histo_rebinned.GetBinError(new_bin)**2 + error**2)) + return histo_rebinned +#nonUniBinning(invmass_histos['nominal'], 150, 5) +for sname, sample in samples.items(): + invmass_histos[sname] = nonUniBinning(invmass_histos[sname], 150, 4) + mcsamples[sname] = nonUniBinning(mcsamples[sname], 150, 4) + outfile[f'recon_{sname}'] = invmass_histos[sname] + outfile[f'mc_{sname}'] = mcsamples[sname] + + +# In[110]: + + +#calculate radiative acceptance +fits = {} +colors = ['#d62728', '#bcbd22', '#2ca02c', '#17becf', '#1f77b4', '#9467bd', '#7f7f7f'] +colors = ['black', 'darkred', 'darkblue', 'darkgreen', 'darkorange'] +fig, ax = plt.subplots(2,1,figsize=(20,20)) +plt.subplot(2,1,1) +plt.xlabel('Invariant Mass [MeV]') +plt.ylabel('Radiative Acceptance') +plt.ylim(0.0, .15) +plt.xlim(20.0,206.0) +for i,(sname, histo) in enumerate(invmass_histos.items()): + ratio = invmass_histos[sname].Clone() + ratio.Divide(mcsamples[sname]) + fitparams, _ = fit_plot_with_poly(ratio, specify_n=7, set_xrange=True, xrange=(30.0, 220.0)) + outfile[f'rad_acc_{sname}'] = ratio + print(sname,fitparams) + #fit_plot_with_poly(ratio, set_xrange=True, xrange=(30.0, 220.0)) + (xvals, yvals, errors), (x_fit, y_fit) = cnv_root_to_np(ratio) + fits[sname] = (x_fit, y_fit) + plt.errorbar(xvals, yvals, yerr=errors, linestyle='', marker='o', color=colors[i], label=sname) + plt.plot(x_fit, y_fit, linewidth=3.0, color=colors[i]) +plt.legend(fontsize=20) +#plot the real radiative acceptance (includes beam) +#radacc_off = polynomial(-0.48922505, 0.073733061, -0.0043873158, 0.00013455495, -2.3630535e-06, 2.5402516e-08, -1.7090900e-10, 7.0355585e-13, -1.6215982e-15, 1.6032317e-18) +#plt.plot(xvals, radacc_off(xvals), label='rad+beam', marker='o', color='blue') + + +plt.subplot(2,1,2) +fit_ratio = fits['misaligned_v1'][1]/fits['nominal'][1] +xvalues = fits['nominal'][0] +plt.plot(xvalues, fit_ratio, color='black', marker = '+', mew=5) +plt.axhline(y=1.0, linestyle='--', color='black') +plt.axhline(y=0.8, linestyle='--', color='black') +plt.xlim(20.0,206.) +plt.ylim(0.6,1.1) +plt.xlabel('A\' Invariant Mass [MeV]') +plt.ylabel('Systematic Uncertainty') + +plt.savefig('radiative_acceptance_misalignment.png') + + +# In[105]: + + +sys_gr = r.TGraph(len(xvalues), xvalues, fit_ratio) +print(xvalues) +params_sys, errors_sys = fit_plot_with_poly(sys_gr, tgraph=True, specify_n = 9, set_xrange=True, xrange=(50.0, 220.0)) +print(params_sys) +(xvals, yvals, errors), (x_fit, y_fit) = cnv_tgraph_to_np(sys_gr) +fig, ax = plt.subplots(figsize=(20,10)) +plt.plot(xvals, yvals, marker='+', mew=3, markersize=10, color='darkblue') +plt.plot(x_fit, y_fit, linewidth=3.0, color='red') +test = polynomial(-8.7913353, 0.61710096, -0.014554635, 0.00011685881, 1.3328346e-06, -4.2065138e-08, 4.6959958e-10, -2.9405730e-12, 1.0885979e-14, -2.2317805e-17, 1.9584455e-20) +outfile['misalignment_systematic'] = sys_gr + + +# In[29]: + + +#Signal misalignment using the nominal no-beam radiative acceptance +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simp_radacc_misalignment_v1/simp_systematic_nominal.root' +with uproot.open(infile) as f: + nominal_h = f['expected_signal_ap_h'].to_hist() + +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simp_radacc_misalignment_v1/simp_systematic_misaligned.root' +with uproot.open(infile) as f: + misaligned_h = f['expected_signal_ap_h'].to_hist() + ratio_h = f['expected_signal_ap_h'].to_hist().reset() +outfile['expected_signal_nominal'] = nominal_h +outfile['expected_signal_misaligned'] = misaligned_h + +nominal_h.plot() +plt.show() +misaligned_h.plot() +plt.show() + +#take ratio of densities, misaligned to nominal +ratio = misaligned_h.values()/nominal_h.values() +# Find where nominal_h values are less than 1.0 +mask = nominal_h.values() < 1.0 +# Set corresponding ratio values to 0 +ratio[mask] = 0 + +xbins = ratio_h.axes[0].centers +ybins = ratio_h.axes[1].centers +xgrid, ygrid = np.meshgrid(xbins, ybins, indexing='ij') +ratio_h.reset() +ratio_h.fill(xgrid.flatten(), ygrid.flatten(), weight=ratio.flatten()) +outfile['signal_systematic_ratio'] = ratio_h + +fig, ax = plt.subplots(figsize=(25,15)) +ratio_h.plot(cmin=np.min(ratio.flatten()[ratio.flatten()>0.0]), cmax=np.max(ratio.flatten())) +plt.savefig('simp_radacc_misaligned_v1.png') + + +# In[66]: + + +#Combine radiative acceptance and signal systematics +#For each A' invariant mass, take the largest value, since this is in the numerator and reduces the expected signal rate +sigsys_y = [] +sigsys_x = [] +for xbin,mass in enumerate(ratio_h.axes[0].centers): + sigsys = np.max(ratio_h.values()[xbin]) + if sigsys == 0.0: + continue + sigsys_x.append(mass) + sigsys_y.append(sigsys) + +sigsys_gr = r.TGraph(len(sigsys_x), np.array(sigsys_x), np.array(sigsys_y)) +params_sigsys, errors_sigsys = fit_plot_with_poly(sigsys_gr, tgraph=True, specify_n = 5, set_xrange=True, xrange=(sigsys_x[0], sigsys_x[-1])) +#params_sigsys, errors_sigsys = fit_plot_with_poly(sigsys_gr, tgraph=True, set_xrange=True, xrange=(sigsys_x[0], sigsys_x[-1])) +print(params_sigsys) +(sigsys_x, sigsys_y, sigsys_errors), (sigsys_xfit, sigsys_yfit) = cnv_tgraph_to_np(sigsys_gr) +plt.plot(sigsys_x, sigsys_y) +plt.plot(sigsys_xfit, sigsys_yfit) +#Looks like we should just choose the overall maximum... +sigsys_final = np.max(sigsys_yfit) +print(f'Signal misalignment acceptance systematic: {sigsys_final}') + + +# In[93]: + + +#Combine the signal and radiative acceptance systematics +radsys_fitpoly = polynomial(-10.307720, 0.97578691, -0.036585723, 0.00077903787, -1.0393704e-05, 9.0187487e-08, -5.0948313e-10, 1.8078746e-12, -3.6566050e-15, 3.2111742e-18) +masses = np.array([float(x) for x in range(60,230,1)]) +#Divide signal systematic by radiative acceptance +misalignmentsys = sigsys_final/radsys_fitpoly(masses) +fig, ax = plt.subplots(figsize=(25,15)) +plt.plot(masses, misalignmentsys, marker='+', markersize=10, mew=3, color='black') + + +# In[ ]: + + + + diff --git a/plotUtils/simps/systematics/radiative_acceptance_systematic.py b/plotUtils/simps/systematics/radiative_acceptance_systematic.py new file mode 100644 index 000000000..14adcb640 --- /dev/null +++ b/plotUtils/simps/systematics/radiative_acceptance_systematic.py @@ -0,0 +1,284 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[1]: + + +import os +import awkward as ak +import numpy as np +import hist +from hist import Hist +import uproot +import ROOT as r +import copy + +import matplotlib.pyplot as plt +import matplotlib as mpl +import mplhep +import matplotlib.gridspec as gridspec + +import sys +sys.path.append('/sdf/group/hps/user-data/alspellm/2016/plotting') +import hps_plot_utils as utils + +get_ipython().run_line_magic('matplotlib', 'inline') +mpl.style.use(mplhep.style.ROOT) +import math +import pickle + +sys.path.append('/sdf/home/a/alspellm/src/hpstr_v62208/plotUtils/simps') +import simp_signal_2016 +from simp_theory_equations import SimpEquations as simpeqs +import copy +# Set global font sizes +plt.rcParams.update({'font.size': 50, # Font size for text + 'axes.titlesize': 50, # Font size for titles + 'axes.labelsize': 50, # Font size for axis labels + 'xtick.labelsize': 50, # Font size for x-axis tick labels + 'ytick.labelsize': 50, # Font size for y-axis tick labels + 'lines.linewidth':4.0, + 'legend.fontsize': 50}) # Font size for legend +plt.rcParams['font.family'] = 'DejaVu Sans' + +def cnv_root_to_np(histo): + nbins = histo.GetNbinsX() + xvals = np.array([histo.GetBinCenter(x+1) for x in range(nbins+1)]) + yvals = np.array([histo.GetBinContent(x+1) for x in range(nbins+1)]) + errors = np.array([histo.GetBinError(x+1) for x in range(nbins+1)]) + underflow = histo.GetBinContent(0) + overflow = histo.GetBinContent(nbins+1) + + #add over/underflow + xvals = np.insert(xvals, 0, xvals[0]-histo.GetBinWidth(1)) + yvals = np.insert(yvals, 0, underflow) + xvals = np.append(xvals, xvals[-1]+histo.GetBinWidth(1)) + yvals = np.append(yvals, overflow) + errors = np.insert(errors, 0, 0.0) + errors = np.append(errors, 0.0) + + #get fit function if it exist + x_fit = None + y_fit = None + if len(histo.GetListOfFunctions()) > 0: + fitfunc = histo.GetListOfFunctions()[0] + x_fit = np.linspace(fitfunc.GetXmin(), fitfunc.GetXmax(), int((fitfunc.GetXmax()-fitfunc.GetXmin())/histo.GetBinWidth(1))) + y_fit = np.array([fitfunc.Eval(x) for x in x_fit]) + + return (xvals, yvals, errors), (x_fit, y_fit) + +def fit_plot_with_poly(plot, tgraph=False, specify_n=None, set_xrange=False, xrange=(0.0, 1.0)): + polys = [] + chi2s = [] + fstats = [] + fit_resultults = [] + + if tgraph: + npoints = plot.GetN() + else: + npoints = 0 + nBins = plot.GetNbinsX() + for ibin in range(nBins): + if plot.GetBinContent(ibin) > 0: + npoints += 1 + pass + + + if not specify_n: + for n in range(11): + fitfunc = r.TF1(f'pol{n}',f'pol{n}') + fitfunc.SetLineColor(r.kRed) + if set_xrange: + fitfunc.SetRange(xrange[0], xrange[1]) + fit_result = plot.Fit(fitfunc,"RSQ") + else: + fit_result = plot.Fit(fitfunc,"SQ") + fitfunc.SetLineColor(r.kRed) + fitfunc.SetMarkerSize(0.0) + chi2s.append(fit_result.Chi2()) + polys.append(n) + fit_resultults.append(fit_result) + + #Perform fstat test to see how much fit improves with additional order (why does this work?) + if n > 0: + fstats.append( (chi2s[n-1]-chi2s[n])*(npoints-n-1)/(chi2s[n])) + else: + fstats.append(0.0) + + print(fstats) + return None, None + else: + fitfunc = r.TF1(f'pol{specify_n}',f'pol{specify_n}') + fitfunc.SetLineColor(r.kRed) + fitfunc.SetLineWidth(5) + if set_xrange: + fitfunc.SetRange(xrange[0], xrange[1]) + fit_result = plot.Fit(fitfunc,"RSQ") + else: + fit_result = plot.Fit(fitfunc,"SQ") + params = fit_result.Parameters() + errors = fit_result.Errors() + #return fit_result + return params, errors + + +# In[2]: + + +signalProcessor = simp_signal_2016.SignalProcessor(np.pi*4., 1.5) +#V0 Projection Significance Data vs MC efficiency + +samples = {} +mcsamples = {} +branches = ["unc_vtx_mass"] + +#LOAD NOMINAL RAD + BEAM +#rad+beam +infile = '/sdf/group/hps/user-data/alspellm/2016/rad_mc/pass4b/rad_beam/rad-beam-hadd-10kfiles-ana-smeared-corr.root' +selection = 'vtxana_radMatchTight_nocuts' #USE RADMATCHTIGHT! +samples['nominal_beam'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.9) & (unc_vtx_psum < 2.4) )', expressions=branches) +#mc ana +infile = '/sdf/group/hps/user-data/alspellm/2016/rad_mc/pass4b/rad_nobeam/rad_nobeam_slic_hadd10ktuples_ana.root' +slicfile = r.TFile(infile, "READ") +mcsamples['nominal_beam'] = copy.deepcopy(slicfile.Get('mcAna/mcAna_mc622Mass_h')) +slicfile.Close() + + +#LOAD NOMINAL RAD + BEAM Mpt5 +#rad+beam +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/hadd_1937files_rad_beam_targetz_Mpt5_recon_ana.root' +selection = 'vtxana_radMatchTight_nocuts' #USE RADMATCHTIGHT! +samples['targetz_Mpt5_beam'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.9) & (unc_vtx_psum < 2.4) )', expressions=branches) +#mc ana +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/hadd_1937files_rad_beam_targetz_Mpt5_mc_ana.root' +slicfile = r.TFile(infile, "READ") +mcsamples['targetz_Mpt5_beam'] = copy.deepcopy(slicfile.Get('mcAna/mcAna_mc622Mass_h')) +slicfile.Close() + +#LOAD NOMINAL RAD + BEAM Ppt5 +#rad+beam +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/hadd_1937files_rad_beam_targetz_Ppt5_recon_ana.root' +selection = 'vtxana_radMatchTight_nocuts' #USE RADMATCHTIGHT! +samples['targetz_Ppt5_beam'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.9) & (unc_vtx_psum < 2.4) )', expressions=branches) +#mc ana +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/hadd_1937files_rad_beam_targetz_Ppt5_mc_ana.root' +slicfile = r.TFile(infile, "READ") +mcsamples['targetz_Ppt5_beam'] = copy.deepcopy(slicfile.Get('mcAna/mcAna_mc622Mass_h')) +slicfile.Close() + +''' +#LOAD NOMINAL RAD NO BEAM +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/hadd_1999files_rad_nobeam_nominal_recon_ana.root' +selection = 'vtxana_radMatchTight_nocuts' #USE RADMATCHTIGHT! +samples['nominal_nobeam'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.9) & (unc_vtx_psum < 2.4) )', expressions=branches) +#mc ana +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/hadd_2kfiles_rad_nobeam_nominal_mc_ana.root' +slicfile = r.TFile(infile, "READ") +mcsamples['nominal_nobeam'] = copy.deepcopy(slicfile.Get('mcAna/mcAna_mc622Mass_h')) +slicfile.Close() +''' + + +# In[ ]: + + + + + +# In[42]: + + +nbinsx = mcsamples['nominal_beam'].GetNbinsX() +first_bin = mcsamples['nominal_beam'].GetBinLowEdge(1) +last_bin = nbinsx*mcsamples['nominal_beam'].GetBinWidth(1) +invmass_h = ( + hist.Hist.new + .StrCategory(list(samples.keys()), name='samples') + .Reg(nbinsx,first_bin,last_bin,label='Invariant Mass [MeV]') + .Double() +) + +#Fill without weights, so that histos can be converted to ROOT and retain statistical uncertainty +invmass_histos = {} +for sname, sample in samples.items(): + invmass_h.fill(sname, sample.unc_vtx_mass*1000.) + invmass_histos[sname] = signalProcessor.cnvHistoToROOT(invmass_h[sname,:]) + invmass_histos[sname].Rebin(2) + mcsamples[sname].Rebin(2) + + +# In[43]: + + +def nonUniBinning(histo, start, size): + edges_a = np.arange(histo.GetBinLowEdge(1),start+histo.GetBinWidth(1),histo.GetBinWidth(1)) + edges_b = np.arange(start,histo.GetBinLowEdge(histo.GetNbinsX()), size) + bin_edges = np.concatenate([edges_a, edges_b[1:]]) + histo_rebinned = r.TH1F(f'{histo.GetName()}_rebinned', f'{histo.GetTitle()}', len(bin_edges)-1, bin_edges) + for bin in range(1, histo.GetNbinsX() + 1): + content = histo.GetBinContent(bin) + center = histo.GetBinCenter(bin) + error = histo.GetBinError(bin) + new_bin = histo_rebinned.FindBin(center) + histo_rebinned.SetBinContent(new_bin, histo_rebinned.GetBinContent(new_bin)+content) + histo_rebinned.SetBinError(new_bin, np.sqrt(histo_rebinned.GetBinError(new_bin)**2 + error**2)) + return histo_rebinned +#nonUniBinning(invmass_histos['nominal'], 150, 5) +#for sname, sample in samples.items(): +# invmass_histos[sname] = nonUniBinning(invmass_histos[sname], 150, 4) +# mcsamples[sname] = nonUniBinning(mcsamples[sname], 150, 4) + + +# In[45]: + + +#calculate radiative acceptance +fits = {} +colors = ['#d62728', '#bcbd22', '#2ca02c', '#17becf', '#1f77b4', '#9467bd', '#7f7f7f'] +colors = ['black', 'darkred', 'darkblue', 'darkgreen', 'darkorange'] +fig, ax = plt.subplots(2,1,figsize=(35,25),gridspec_kw={'height_ratios': [3, 2]}) +plt.subplot(2,1,1) +plt.xlabel('A\' Invariant Mass [MeV]') +plt.ylabel('Radiative Acceptance') +labels = ['Nominal (-4.3 mm)', '-4.8 mm', '-3.8 mm'] +for i,(sname, histo) in enumerate(invmass_histos.items()): + ratio = invmass_histos[sname].Clone() + ratio.Divide(mcsamples[sname]) + fit_params,_ = fit_plot_with_poly(ratio, specify_n=7, set_xrange=True, xrange=(30.0, 220.0)) + print(sname, fit_params) + (xvals, yvals, errors), (x_fit, y_fit) = cnv_root_to_np(ratio) + plt.errorbar(xvals, yvals, yerr=errors, linestyle='', marker='o', color=colors[i], label=labels[i]) + plt.plot(x_fit, y_fit, linewidth=3.0, color=colors[i]) + fits[sname] = (x_fit, y_fit) +plt.ylim(0.0, .15) +plt.xlim(30.0,220.0) +plt.legend(fontsize=50) +plt.text(53,0.125,'MC Radiative Tridents\n + Beam', horizontalalignment='center', fontsize=40) + +plt.subplot(2,1,2) +fit_ratio_Mpt5 = fits['targetz_Mpt5_beam'][1]/fits['nominal_beam'][1] +fit_ratio_Ppt5 = fits['targetz_Ppt5_beam'][1]/fits['nominal_beam'][1] +xvalues = fits['nominal_beam'][0] +plt.plot(xvalues, fit_ratio_Mpt5, color='darkred', marker='o', label='-4.8 mm : Nominal') +plt.plot(xvalues, fit_ratio_Ppt5, color='darkblue', marker='o', label='-3.8 mm : Nominal') +plt.axhline(y=1.0, linestyle='--', color='black') +plt.axhline(y=1.05, linestyle='--', color='black') +plt.xlim(30.0,220.) +plt.ylim(0.85,1.075) +plt.xlabel('A\' Invariant Mass [MeV]') +plt.ylabel('Ratio') +plt.legend() + +plt.savefig('radiative_acceptance_target_deltaz.png') + + #c = r.TCanvas('f{sname}', 'f{sname}', 2000, 1000) + #c.cd() + #ratio.Draw() + #c.Draw() + + +# In[ ]: + + + + diff --git a/plotUtils/simps/systematics/signal_misalignment_systematic.py b/plotUtils/simps/systematics/signal_misalignment_systematic.py new file mode 100644 index 000000000..af9515918 --- /dev/null +++ b/plotUtils/simps/systematics/signal_misalignment_systematic.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[1]: + + +import os +import awkward as ak +import numpy as np +import hist +from hist import Hist +import uproot +import ROOT as r +import copy + +import matplotlib.pyplot as plt +import matplotlib as mpl +import mplhep +import matplotlib.gridspec as gridspec + +import sys +sys.path.append('/sdf/group/hps/user-data/alspellm/2016/plotting') +import hps_plot_utils as utils + +get_ipython().run_line_magic('matplotlib', 'inline') +mpl.style.use(mplhep.style.ROOT) +import math +import pickle + +sys.path.append('/sdf/home/a/alspellm/src/hpstr_v62208/plotUtils/simps') +import simp_signal_2016 +from simp_theory_equations import SimpEquations as simpeqs +import copy +# Set global font sizes +plt.rcParams.update({'font.size': 40, # Font size for text + 'axes.titlesize': 40, # Font size for titles + 'axes.labelsize': 40, # Font size for axis labels + 'xtick.labelsize': 40, # Font size for x-axis tick labels + 'ytick.labelsize': 40, # Font size for y-axis tick labels + 'lines.linewidth':3.0, + 'legend.fontsize': 40}) # Font size for legend +plt.rcParams['font.family'] = 'DejaVu Sans' + + +# In[ ]: + + +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simps/expected_signals/simp_systematic_nominal.root' +with uproot.open(infile) as f: + nominal_h = f['expected_signal_ap_h'].to_hist() + +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simps/expected_signals/simp_systematic_misaligned.root' +with uproot.open(infile) as f: + misaligned_h = f['expected_signal_ap_h'].to_hist() + ratio_h = f['expected_signal_ap_h'].to_hist().reset() + +nominal_h.plot() +plt.show() +misaligned_h.plot() +plt.show() + +#take ratio of densities, misaligned to nominal +ratio = misaligned_h.values()/nominal_h.values() +# Find where nominal_h values are less than 1.0 +mask = nominal_h.values() < 1.0 +# Set corresponding ratio values to 0 +ratio[mask] = 0 + + +xbins = ratio_h.axes[0].centers +ybins = ratio_h.axes[1].centers +xgrid, ygrid = np.meshgrid(xbins, ybins, indexing='ij') +ratio_h.reset() +ratio_h.fill(xgrid.flatten(), ygrid.flatten(), weight=ratio.flatten()) +fig, ax = plt.subplots(figsize=(25,15)) +ratio_h.plot(cmin=0.0, cmax=np.max(ratio.flatten())) + + + +# In[ ]: + + +# + diff --git a/plotUtils/simps/systematics/signal_target_position_systematic.py b/plotUtils/simps/systematics/signal_target_position_systematic.py new file mode 100644 index 000000000..3d9c16dbd --- /dev/null +++ b/plotUtils/simps/systematics/signal_target_position_systematic.py @@ -0,0 +1,271 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[76]: + + +import os +import awkward as ak +import numpy as np +import hist +from hist import Hist +import uproot +import ROOT as r +import copy + +import matplotlib.pyplot as plt +import matplotlib as mpl +import mplhep +import matplotlib.gridspec as gridspec + +import sys +sys.path.append('/sdf/group/hps/user-data/alspellm/2016/plotting') +import hps_plot_utils as utils + +get_ipython().run_line_magic('matplotlib', 'inline') +mpl.style.use(mplhep.style.ROOT) +import math +import pickle + +sys.path.append('/sdf/home/a/alspellm/src/hpstr_v62208/plotUtils/simps') +import simp_signal_2016 +from simp_theory_equations import SimpEquations as simpeqs +import copy +# Set global font sizes +plt.rcParams.update({'font.size': 40, # Font size for text + 'axes.titlesize': 40, # Font size for titles + 'axes.labelsize': 40, # Font size for axis labels + 'xtick.labelsize': 40, # Font size for x-axis tick labels + 'ytick.labelsize': 40, # Font size for y-axis tick labels + 'lines.linewidth':3.0, + 'legend.fontsize': 40}) # Font size for legend +plt.rcParams['font.family'] = 'DejaVu Sans' + + +# In[2]: + + +samples = {} +branches = ['unc_vtx_ele_track_z0','unc_vtx_pos_track_z0', 'unc_vtx_z', 'unc_vtx_mass', 'unc_vtx_proj_sig'] +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simps/target_unc/Mpt5/hadd_simp_mass_60_nobeam_target_Mpt5_recon_ana.root' +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simps/target_unc/Mpt5/hadd_simp_mass_60_nobeam_target_Mpt5_recon_ana.root' +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simp_target_unc/nov0proj/nominal.root' +signalProcessor = simp_signal_2016.SignalProcessor(np.pi*4., 1.5) +selection = 'vtxana_radMatchTight_2016_simp_SR_analysis' #USE RADMATCHTIGHT! +samples['Mpt5'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.0) & (unc_vtx_psum < 1.9) )', expressions=branches) +samples['Mpt5']['weight'] = 1.0 + +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simps/nominal/hadd_simp_mass_60_nobeam_nominal_recon_ana.root' +signalProcessor = simp_signal_2016.SignalProcessor(np.pi*4., 1.5) +samples['nominal'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.0) & (unc_vtx_psum < 1.9) )', expressions=branches) +samples['nominal']['weight'] = 1.0 + + +# In[ ]: + + +z0_h = ( + hist.Hist.new + .StrCategory(list(samples.keys()), name='samples') + .Reg(60,-3.0, 3.0,label='Track y0 [mm]') + .Double() +) +for sname, sample in samples.items(): + z0_h.fill(sname, samples[sname].unc_vtx_ele_track_z0) + z0_h.fill(sname, samples[sname].unc_vtx_pos_track_z0) +fig, ax = plt.subplots(figsize=(20,10)) +z0_h.plot() +plt.legend() + +miny0_h = ( + hist.Hist.new + .StrCategory(list(samples.keys()), name='samples') + .Reg(60,0, 3.0,label='Min y0 [mm]') + .Double() +) +for sname, sample in samples.items(): + miny0_h.fill(sname, samples[sname].unc_vtx_min_z0) + +fig, ax = plt.subplots(figsize=(20,10)) +miny0_h.plot() +nomsum = ak.sum(samples['nominal'].weight) +mpt5sum = ak.sum(samples['Mpt5'].weight) +mpt5sum/nomsum + + +# In[36]: + + +vprojsig_h = ( + hist.Hist.new + .StrCategory(list(samples.keys()), name='samples') + .Reg(100,0, 20.0,label='vprojsig') + .Double() +) +for sname, sample in samples.items(): + vprojsig_h.fill(sname, samples[sname].unc_vtx_proj_sig) +vprojsig_h.plot() + + +# In[ ]: + + + + + +# In[33]: + + +nominal_mask = signalProcessor.minz0_sel(samples['nominal']) +Mpt5_mask = signalProcessor.minz0_sel(samples['Mpt5']) +print(len(samples['nominal'][nom_mask].unc_vtx_z)) +print(len(samples['Mpt5'][Mpt5_mask].unc_vtx_z)) + + +# In[181]: + + +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simp_target_unc/nov0proj/nominal.root' +with uproot.open(infile) as f: + nominal_h = f['expected_signal_ap_h'].to_hist() + test_h = f['expected_signal_vd_h'].to_hist() + +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simp_target_unc/nov0proj/target_Mpt5.root' +with uproot.open(infile) as f: + Mpt5_h = f['expected_signal_ap_h'].to_hist() + ratio_Mpt5_h = f['expected_signal_ap_h'].to_hist().reset() + +infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simp_target_unc/nov0proj/target_Ppt5.root' +with uproot.open(infile) as f: + Ppt5_h = f['expected_signal_ap_h'].to_hist() + ratio_Ppt5_h = f['expected_signal_ap_h'].to_hist().reset() + + +# In[182]: + + +nominal_h.plot() +plt.show() +test_h.plot() +plt.show() +Mpt5_h.plot() +plt.show() +Ppt5_h.plot() + + +# In[79]: + + +#take ratio of densities, misaligned to nominal +ratio_Mpt5 = Mpt5_h.values()/nominal_h.values() +mask = nominal_h.values() < 0.0 +ratio_Mpt5[mask] = 0 + +xbins = ratio_Mpt5_h.axes[0].centers +ybins = ratio_Mpt5_h.axes[1].centers +xgrid, ygrid = np.meshgrid(xbins, ybins, indexing='ij') +ratio_Mpt5_h.reset() +ratio_Mpt5_h.fill(xgrid.flatten(), ygrid.flatten(), weight=ratio_Mpt5.flatten()) +fig, ax = plt.subplots(figsize=(25,15)) +#ratio_Mpt5_h.plot(cmin=0.9, cmax=np.max(ratio_Mpt5.flatten())) +ratio_Mpt5_h.plot(cmin=0.8, cmax=1.2, cmap='RdYlBu') + + +# In[80]: + + +#take ratio of densities, misaligned to nominal +ratio_Ppt5 = Ppt5_h.values()/nominal_h.values() +mask = nominal_h.values() < 0.0 +ratio_Ppt5[mask] = 0 + +xbins = ratio_Ppt5_h.axes[0].centers +ybins = ratio_Ppt5_h.axes[1].centers +xgrid, ygrid = np.meshgrid(xbins, ybins, indexing='ij') +ratio_Ppt5_h.reset() +ratio_Ppt5_h.fill(xgrid.flatten(), ygrid.flatten(), weight=ratio_Ppt5.flatten()) +fig, ax = plt.subplots(figsize=(25,15)) +ratio_Ppt5_h.plot(cmin=0.8, cmax=1.2, cmap='seismic') + + +# In[184]: + + +##### take ratio of densities, misaligned to nominal +ratio_PM = Ppt5_h.values()/Mpt5_h.values() +#mask = Ppt5_h.values() < 0.5 +#ratio_PM[mask] = 0 + +xbins = ratio_Ppt5_h.axes[0].centers +ybins = ratio_Ppt5_h.axes[1].centers +xgrid, ygrid = np.meshgrid(xbins, ybins, indexing='ij') +ratio_Ppt5_h.reset() +ratio_Ppt5_h.fill(xgrid.flatten(), ygrid.flatten(), weight=ratio_PM.flatten()) +fig, ax = plt.subplots(figsize=(25,15)) +ratio_Ppt5_h.plot(cmin=0.90, cmax=1.1, cmap='seismic') +plt.text(124, -5.4,'Expected Signal Ratio\n Between Off-Nominal Targets' , horizontalalignment='center') +plt.ylim(-6.25, -4.7) +#plt.xlim(79.7,199.2) +plt.xlim(50.0,210.0) +plt.ylabel(r'$\log{\epsilon^2}$', fontsize=50) +plt.xlabel('A\' Invariant Mass [MeV]') +plt.savefig('signal_target_uncertainty_offnominal_ratio_2d.png') + +masses = [] +minvalues = [] +for m, mass in enumerate(ratio_Ppt5_h.axes[0].centers): + values = ratio_Ppt5_h.values()[m] + ybins = ratio_Ppt5_h.axes[1].centers + mask = np.where((ybins > -6.25) & (ybins < -4.7) ) + values_masked = values[mask] + minv = np.min(values_masked) + if not np.isfinite(minv): + continue + masses.append(mass) + minvalues.append(minv) + +coefficients = np.polyfit(masses, minvalues, 4) +print(coefficients) +fitfunc = np.poly1d(coefficients) +xfit = np.linspace(min(masses), max(masses),100) +yfit = fitfunc(xfit) + +fig, ax = plt.subplots(figsize=(25,15)) +plt.scatter(masses, minvalues, s=800, marker='+', color='black', label='Minimum Ratio') +plt.plot(xfit, yfit, color='darkred', label='4th-Order Fit') +plt.axhline(y=1.0, linestyle='--', color='black') +plt.ylim(0.5,1.2) +plt.xlim(50.0,210.) +plt.xlabel('A\' Invariant Mass [MeV]') +plt.ylabel('Expected Signal Ratio') +plt.legend() +plt.savefig('signal_target_uncertainty_offnominal_v2.png') + + +# In[ ]: + + + + + +# In[180]: + + +import numpy as np +import scipy.stats as stats + +n_sigma = 1.5 +percentage = stats.norm.cdf(n_sigma) - stats.norm.cdf(-n_sigma) +print(percentage) + +n_sigma = 1.5 - (1.5*0.087) +percentage_2 = stats.norm.cdf(n_sigma) - stats.norm.cdf(-n_sigma) +print(percentage_2) +print(1 -(percentage_2/percentage)) + + +# In[147]: + + + + diff --git a/plotUtils/simps/systematics/v0projsig_systematic.py b/plotUtils/simps/systematics/v0projsig_systematic.py new file mode 100644 index 000000000..d38b19439 --- /dev/null +++ b/plotUtils/simps/systematics/v0projsig_systematic.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[1]: + + +import os +import awkward as ak +import numpy as np +import hist +from hist import Hist +import uproot +import ROOT as r +import copy + +import matplotlib.pyplot as plt +import matplotlib as mpl +import mplhep +import matplotlib.gridspec as gridspec + +import sys +sys.path.append('/sdf/group/hps/user-data/alspellm/2016/plotting') +import hps_plot_utils as utils + +get_ipython().run_line_magic('matplotlib', 'inline') +mpl.style.use(mplhep.style.ROOT) +import math +import pickle + +sys.path.append('/sdf/home/a/alspellm/src/hpstr/plotUtils/simps') +import simp_signal_2016 +from simp_theory_equations import SimpEquations as simpeqs +import copy +# Set global font sizes +plt.rcParams.update({'font.size': 60, # Font size for text + 'axes.titlesize': 60, # Font size for titles + 'axes.labelsize': 60, # Font size for axis labels + 'xtick.labelsize': 60, # Font size for x-axis tick labels + 'ytick.labelsize': 60, # Font size for y-axis tick labels + 'lines.linewidth':5.0, + 'legend.fontsize': 60}) # Font size for legend +plt.rcParams['font.family'] = 'DejaVu Sans' + + +# In[3]: + + +signalProcessor = simp_signal_2016.SignalProcessor(np.pi*4., 1.5) +#V0 Projection Significance Data vs MC efficiency + +samples = {} +branches = ["unc_vtx_proj_sig","unc_vtx_ele_track_z0","unc_vtx_pos_track_z0"] + +#Read 10% Data +infile = '/sdf/group/hps/user-data/alspellm/2016/data/hadd_BLPass4c_1959files.root' +selection = 'vtxana_Tight_L1L1_nvtx1' +samples['data'] = signalProcessor.load_data(infile,selection, expressions=branches, cut_expression='((unc_vtx_psum > 1.0) & (unc_vtx_psum < 1.9) )') +samples['data']['weight'] = 1.0 #Assign weight of 10 to scale up to full lumi + +lumi = 10.7*.1 #pb-1 +mc_scale = {'tritrig' : 1.416e9*lumi/(50000*10000), + 'wab' : 0.1985e12*lumi/(100000*10000)} + +#Load tritrig +infile = '/sdf/group/hps/user-data/alspellm/2016/tritrig_mc/pass4b/tritrig-beam-hadd-10kfiles-ana-smeared-corr.root' +infile = '/sdf/group/hps/user-data/alspellm/2016/tritrig_mc/pass4b/hadded_tritrig-beam-10kfiles-ana-smeared-corr_beamspotfix.root' +samples['tritrig'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.2) & (unc_vtx_psum < 1.9) )', expressions=branches) +samples['tritrig']['weight'] = mc_scale['tritrig'] + +#Load wab +infile = '/sdf/group/hps/user-data/alspellm/2016/wab_mc/pass4b/wab-beam-hadd-10kfiles-ana-smeared-corr.root' +infile = '/sdf/group/hps/user-data/alspellm/2016/wab_mc/pass4b/hadded_wab-beam-10kfiles-ana-smeared-corr_beamspotfix.root' +samples['wab'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.2) & (unc_vtx_psum < 1.9) )', expressions=branches) +samples['wab']['weight'] = mc_scale['wab'] + +#Combine tritrig and wab +samples['tritrig+wab+beam'] = ak.concatenate([samples['tritrig'], samples['wab']]) + + +# In[4]: + + +v0projsig_h = ( + hist.Hist.new + .StrCategory(list(samples.keys()), name='samples') + .Reg(300, 0.0,30,label=r'Target Projected Vertex Significance $[N\sigma_{\text{V0proj}}]$') + .Double() +) + +#Fill without weights, so that histos can be converted to ROOT and retain statistical uncertainty +for sname, sample in samples.items(): + v0projsig_h.fill(sname, sample.unc_vtx_proj_sig, weight=sample.weight/ak.sum(sample.weight)) + +#Events that pass v0projsig in data vs MC bkg +eff_mc = round(v0projsig_h['tritrig+wab+beam',:][:hist.loc(2.0):sum]/v0projsig_h['tritrig+wab+beam',:][::sum],2) +eff_data = round(v0projsig_h['data',:][:hist.loc(2.0):sum]/v0projsig_h['data',:][::sum],2) + +fig, ax = plt.subplots(figsize=(60,40)) +v0projsig_h['data',:].plot(linewidth=3.0, label='10% Data', color='black') +v0projsig_h['tritrig+wab+beam',:].plot(linewidth=3.0, label='Tritrig+WAB+Beam', color='blue') +plt.axvline(x=2.0, linestyle='--', color='red', label='Cut > 2.0') +plt.text(15.0, 3e-3, f'Data Eff: {eff_data}\nMC Bkg Eff: {eff_mc}') +plt.legend() +plt.ylabel('Normalized Events') +plt.yscale('log') +plt.savefig('v0projsig_systematic_lowpsum.png') + + +# In[ ]: + + + + From 314242b1562c429278085935bf7af0da6dc73baf Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Thu, 5 Sep 2024 09:22:22 -0700 Subject: [PATCH 22/27] documenting --- ...tive_acceptance_systematic-misalignment.py | 353 ++++++------------ 1 file changed, 113 insertions(+), 240 deletions(-) diff --git a/plotUtils/simps/systematics/radiative_acceptance_systematic-misalignment.py b/plotUtils/simps/systematics/radiative_acceptance_systematic-misalignment.py index 6237578cb..11e1ac94f 100644 --- a/plotUtils/simps/systematics/radiative_acceptance_systematic-misalignment.py +++ b/plotUtils/simps/systematics/radiative_acceptance_systematic-misalignment.py @@ -1,9 +1,10 @@ -#!/usr/bin/env python -# coding: utf-8 - -# In[27]: - - +#!/usr/bin/python3 +#================================================================================================================================== +# Description: Compares the radiative acceptance between nominal and misaligned HPS detector versions +# Author: Alic Spellman +# Date: 09/05/2024 +# Script to load MC samples, plot and compute misalignment systematics for 2016 simp L1L1 analysis +# Calculates both radiative trident acceptance and signal acceptance import os import awkward as ak import numpy as np @@ -12,190 +13,73 @@ import uproot import ROOT as r import copy - import matplotlib.pyplot as plt import matplotlib as mpl -import mplhep import matplotlib.gridspec as gridspec - import sys -sys.path.append('/sdf/group/hps/user-data/alspellm/2016/plotting') -import hps_plot_utils as utils - -get_ipython().run_line_magic('matplotlib', 'inline') -mpl.style.use(mplhep.style.ROOT) import math -import pickle -sys.path.append('/sdf/home/a/alspellm/src/hpstr_v62208/plotUtils/simps') +#simp tools defined in hpstr +hpstr_base = os.getenv('HPSTR_BASE') +sys.path.append(f'{hpstr_base}/plotUtils/simps') import simp_signal_2016 -from simp_theory_equations import SimpEquations as simpeqs -import copy -# Set global font sizes -plt.rcParams.update({'font.size': 40, # Font size for text - 'axes.titlesize': 40, # Font size for titles - 'axes.labelsize': 40, # Font size for axis labels - 'xtick.labelsize': 40, # Font size for x-axis tick labels - 'ytick.labelsize': 40, # Font size for y-axis tick labels - 'lines.linewidth':3.0, - 'legend.fontsize': 40}) # Font size for legend -plt.rcParams['font.family'] = 'DejaVu Sans' - -def cnv_tgraph_to_np(tgraph): - # Number of points in the TGraph - npoints = tgraph.GetN() - - # Retrieve X and Y values - xvals = np.array([tgraph.GetX()[i] for i in range(npoints)]) - yvals = np.array([tgraph.GetY()[i] for i in range(npoints)]) - - # Errors are not directly available in TGraph; setting them to zero - errors = np.zeros(npoints) - - # Handle fit function if it exists - x_fit = None - y_fit = None - if len(tgraph.GetListOfFunctions()) > 0: - fitfunc = tgraph.GetListOfFunctions()[0] - x_fit = np.linspace(fitfunc.GetXmin(), fitfunc.GetXmax(), 100) # 100 points for the fit - y_fit = np.array([fitfunc.Eval(x) for x in x_fit]) - - return (xvals, yvals, errors), (x_fit, y_fit) - -def cnv_root_to_np(histo): - nbins = histo.GetNbinsX() - xvals = np.array([histo.GetBinCenter(x+1) for x in range(nbins+1)]) - yvals = np.array([histo.GetBinContent(x+1) for x in range(nbins+1)]) - errors = np.array([histo.GetBinError(x+1) for x in range(nbins+1)]) - underflow = histo.GetBinContent(0) - overflow = histo.GetBinContent(nbins+1) - - #add over/underflow - xvals = np.insert(xvals, 0, xvals[0]-histo.GetBinWidth(1)) - yvals = np.insert(yvals, 0, underflow) - xvals = np.append(xvals, xvals[-1]+histo.GetBinWidth(1)) - yvals = np.append(yvals, overflow) - errors = np.insert(errors, 0, 0.0) - errors = np.append(errors, 0.0) - - #get fit function if it exist - x_fit = None - y_fit = None - if len(histo.GetListOfFunctions()) > 0: - fitfunc = histo.GetListOfFunctions()[0] - x_fit = np.linspace(fitfunc.GetXmin(), fitfunc.GetXmax(), int((fitfunc.GetXmax()-fitfunc.GetXmin())/histo.GetBinWidth(1))) - y_fit = np.array([fitfunc.Eval(x) for x in x_fit]) - - return (xvals, yvals, errors), (x_fit, y_fit) - -def fit_plot_with_poly(plot, tgraph=False, specify_n=None, set_xrange=False, xrange=(0.0, 1.0)): - polys = [] - chi2s = [] - fstats = [] - fit_resultults = [] - - if tgraph: - npoints = plot.GetN() - else: - npoints = 0 - nBins = plot.GetNbinsX() - for ibin in range(nBins): - if plot.GetBinContent(ibin) > 0: - npoints += 1 - pass - - - if not specify_n: - for n in range(12): - fitfunc = r.TF1(f'pol{n}',f'pol{n}') - fitfunc.SetLineColor(r.kRed) - if set_xrange: - fitfunc.SetRange(xrange[0], xrange[1]) - fit_result = plot.Fit(fitfunc,"RSQ") - else: - fit_result = plot.Fit(fitfunc,"SQ") - fitfunc.SetLineColor(r.kRed) - fitfunc.SetMarkerSize(0.0) - chi2s.append(fit_result.Chi2()) - polys.append(n) - fit_resultults.append(fit_result) - - #Perform fstat test to see how much fit improves with additional order (why does this work?) - if n > 0: - fstats.append( (chi2s[n-1]-chi2s[n])*(npoints-n-1)/(chi2s[n])) - else: - fstats.append(0.0) - - print(fstats) - return None, None - else: - fitfunc = r.TF1(f'pol{specify_n}',f'pol{specify_n}') - fitfunc.SetLineColor(r.kRed) - fitfunc.SetLineWidth(5) - if set_xrange: - fitfunc.SetRange(xrange[0], xrange[1]) - fit_result = plot.Fit(fitfunc,"RSQ") - else: - fit_result = plot.Fit(fitfunc,"SQ") - #params = np.round(fit_result.Parameters(),4) - #errors = np.round(fit_result.Errors(),4) - params = fit_result.Parameters() - errors = fit_result.Errors() - #return fit_result - return params, errors - -def polynomial(*coefficients): - def _implementation(x): - return sum([ - coefficient * x**power - for power, coefficient in enumerate(coefficients) - ]) - return _implementation - - -# In[3]: - - -signalProcessor = simp_signal_2016.SignalProcessor(np.pi*4., 1.5) -#V0 Projection Significance Data vs MC efficiency +#====================================================================================================================================== +#INITIALIZATION +#======================================================================================================================================= +# Set plotting parameters for matplotlib +plt.rcParams.update({'font.size': 40, 'axes.titlesize': 40, 'axes.labelsize': 40, 'xtick.labelsize': 40, 'ytick.labelsize': 40, 'lines.linewidth': 3.0, 'legend.fontsize': 40}) +plt.rcParams['font.family'] = 'DejaVu Sans' + +#parse input arguments +import argparse +parser = argparse.ArgumentParser(description='') +parser.add_argument('--outdir', type=str, default='./search_results') +parser.add_argument('--mpifpi', type=float, default=4.*np.pi) + +args = parser.parse_args() +outdir = args.outdir + +#======================================================================================================================================= +# LOAD DATA: Initialize signal processor and load radiative trident MC samples +#======================================================================================================================================= +search_window = 1.5 +signalProcessor = simp_signal_2016.SignalProcessor(args.mpifpi, search_window) + +##Load MC samples for nominal and misaligned detectors samples = {} mcsamples = {} branches = ["unc_vtx_mass", "unc_vtx_psum"] -#LOAD NOMINAL -#rad +#Load reconstructed and selected radiative events for nominal detector infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/misalignments/hadd_2kfiles_rad_nobeam_nominal_recon_ana.root' selection = 'vtxana_radMatchTight_nocuts' #USE RADMATCHTIGHT! samples['nominal'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.9) & (unc_vtx_psum < 2.4) )', expressions=branches) -#mc ana + +#Load generated events (mc ana) for nominal detector infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/misalignments/hadd_2kfiles_rad_nobeam_nominal_mc_ana.root' slicfile = r.TFile(infile, "READ") mcsamples['nominal'] = copy.deepcopy(slicfile.Get('mcAna/mcAna_mc622Mass_h')) slicfile.Close() -#LOAD MISALIGNED -#rad +#Load reconstructed and selected radiative events for misaligned detector infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/misalignments/hadd_2kfiles_rad_nobeam_misalignments_1_recon_ana.root' selection = 'vtxana_radMatchTight_nocuts' #USE RADMATCHTIGHT! samples['misaligned_v1'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.9) & (unc_vtx_psum < 2.4) )', expressions=branches) -#mc ana + +#Load generated events (mc ana) for misaligned detector infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/misalignments/hadd_2kfiles_rad_nobeam_misalignments_1_mc_ana.root' slicfile = r.TFile(infile, "READ") mcsamples['misaligned_v1'] = copy.deepcopy(slicfile.Get('mcAna/mcAna_mc622Mass_h')) slicfile.Close() +#output file to store systematic results +outfile = uproot.recreate(f'{outdir}/radacc_misalignment_systematic_results.root') -# In[4]: - - -outfile = uproot.recreate('radacc_misalignment_systematic_results.root') - - -# In[26]: - - -#plot radiative peak +#======================================================================================================================================= +# CHECK RADIATIVE PEAK: Ensure that the radiative peaks for misaligned and nominal are commensurate. +# Peak will be off if overly-misaligned +#======================================================================================================================================= psum_h = ( hist.Hist.new .StrCategory(list(samples.keys()), name='samples') @@ -206,24 +90,15 @@ def _implementation(x): fig, ax = plt.subplots(figsize=(25,15)) for i,(sname, sample) in enumerate(samples.items()): psum_h.fill(sname, sample.unc_vtx_psum) - #xvals = psum_h[sname,:].axes[0].centers - #yvals = psum_h[sname,:].values() - #plt.plot(xvals, yvals, color=colors[i],marker='o', markersize=20, mew=3, linestyle='',label=sname) psum_h.plot(color=['black','darkred'], linewidth=3.0) plt.legend() plt.ylabel('Events') -plt.savefig('radiative_peak_misaligned.png') - - -# In[ ]: - - - - - -# In[102]: - +plt.savefig(f'{outdir}/radiative_peak_misaligned.png') +#======================================================================================================================================= +# RADIATIVE ACCEPTANCE HISTOGRAMS: Initialize invariant mass histograms, rebin, and convert them to ROOT histograms +#======================================================================================================================================= +#invariant mass histogram binning must match mc ana invariant mass to take ratio nbinsx = mcsamples['nominal'].GetNbinsX() first_bin = mcsamples['nominal'].GetBinLowEdge(1) last_bin = nbinsx*mcsamples['nominal'].GetBinWidth(1) @@ -234,7 +109,7 @@ def _implementation(x): .Double() ) -#Fill without weights, so that histos can be converted to ROOT and retain statistical uncertainty +#Fill mc components without weights and convert to ROOT, rebin invmass_histos = {} for sname, sample in samples.items(): invmass_h.fill(sname, sample.unc_vtx_mass*1000.) @@ -242,10 +117,6 @@ def _implementation(x): invmass_histos[sname].Rebin(2) mcsamples[sname].Rebin(2) - -# In[103]: - - def nonUniBinning(histo, start, size): edges_a = np.arange(histo.GetBinLowEdge(1),start+histo.GetBinWidth(1),histo.GetBinWidth(1)) edges_b = np.arange(start,histo.GetBinLowEdge(histo.GetNbinsX()), size) @@ -259,47 +130,71 @@ def nonUniBinning(histo, start, size): histo_rebinned.SetBinContent(new_bin, histo_rebinned.GetBinContent(new_bin)+content) histo_rebinned.SetBinError(new_bin, np.sqrt(histo_rebinned.GetBinError(new_bin)**2 + error**2)) return histo_rebinned -#nonUniBinning(invmass_histos['nominal'], 150, 5) + +#enable non-uniform binning for sname, sample in samples.items(): invmass_histos[sname] = nonUniBinning(invmass_histos[sname], 150, 4) mcsamples[sname] = nonUniBinning(mcsamples[sname], 150, 4) outfile[f'recon_{sname}'] = invmass_histos[sname] outfile[f'mc_{sname}'] = mcsamples[sname] +#======================================================================================================================================= +# RADIATIVE ACCEPTANCE: Compute and plot the radiative acceptance for nominal and misaligned +#======================================================================================================================================= -# In[110]: - - -#calculate radiative acceptance fits = {} colors = ['#d62728', '#bcbd22', '#2ca02c', '#17becf', '#1f77b4', '#9467bd', '#7f7f7f'] colors = ['black', 'darkred', 'darkblue', 'darkgreen', 'darkorange'] + +#Figure to plot radiative acceptance and systematic uncertainty fig, ax = plt.subplots(2,1,figsize=(20,20)) plt.subplot(2,1,1) plt.xlabel('Invariant Mass [MeV]') plt.ylabel('Radiative Acceptance') plt.ylim(0.0, .15) plt.xlim(20.0,206.0) + +#Calculate radiative acceptance as ratio of recon+sel/generated for each detector +#these are root histograms. for i,(sname, histo) in enumerate(invmass_histos.items()): + + #divide recon+sel by generated ratio = invmass_histos[sname].Clone() ratio.Divide(mcsamples[sname]) - fitparams, _ = fit_plot_with_poly(ratio, specify_n=7, set_xrange=True, xrange=(30.0, 220.0)) + + #fit radiative acceptance + fitparams, _ = signalProcessor.fit_plot_with_poly(ratio, specify_n=7, set_xrange=True, xrange=(30.0, 220.0)) outfile[f'rad_acc_{sname}'] = ratio print(sname,fitparams) - #fit_plot_with_poly(ratio, set_xrange=True, xrange=(30.0, 220.0)) + + #convert root histograms to numpy data for convenient plotting using mpl (xvals, yvals, errors), (x_fit, y_fit) = cnv_root_to_np(ratio) fits[sname] = (x_fit, y_fit) + + #plot plt.errorbar(xvals, yvals, yerr=errors, linestyle='', marker='o', color=colors[i], label=sname) plt.plot(x_fit, y_fit, linewidth=3.0, color=colors[i]) + plt.legend(fontsize=20) #plot the real radiative acceptance (includes beam) #radacc_off = polynomial(-0.48922505, 0.073733061, -0.0043873158, 0.00013455495, -2.3630535e-06, 2.5402516e-08, -1.7090900e-10, 7.0355585e-13, -1.6215982e-15, 1.6032317e-18) #plt.plot(xvals, radacc_off(xvals), label='rad+beam', marker='o', color='blue') +#======================================================================================================================================= +# CALCULATE SYSTEMATIC UNCERTAINTY: Using the ratio of the nominal and misaligned radiative acceptance functions +# If the radiative acceptance increases with misalignment, that represents a decrease in expected signal (no systematic) +# If the radiative acceptance decreases with misalignment, that will boost expected signal and must be accounted for. +#======================================================================================================================================= +#this is a subfigure of the figure above plt.subplot(2,1,2) + +#calculate ratio of the two radiative acceptance fits +#if ratio < 1.0, apply systematic to expected signal fit_ratio = fits['misaligned_v1'][1]/fits['nominal'][1] xvalues = fits['nominal'][0] + +#plot the ratio plt.plot(xvalues, fit_ratio, color='black', marker = '+', mew=5) plt.axhline(y=1.0, linestyle='--', color='black') plt.axhline(y=0.8, linestyle='--', color='black') @@ -307,73 +202,62 @@ def nonUniBinning(histo, start, size): plt.ylim(0.6,1.1) plt.xlabel('A\' Invariant Mass [MeV]') plt.ylabel('Systematic Uncertainty') - -plt.savefig('radiative_acceptance_misalignment.png') - - -# In[105]: +plt.savefig(f'{outdir}/radiative_acceptance_misalignment.png') +#fit the systematic uncertainty results and save the fit sys_gr = r.TGraph(len(xvalues), xvalues, fit_ratio) -print(xvalues) -params_sys, errors_sys = fit_plot_with_poly(sys_gr, tgraph=True, specify_n = 9, set_xrange=True, xrange=(50.0, 220.0)) -print(params_sys) -(xvals, yvals, errors), (x_fit, y_fit) = cnv_tgraph_to_np(sys_gr) +params_sys, errors_sys = signalProcessor.fit_plot_with_poly(sys_gr, tgraph=True, specify_n = 9, set_xrange=True, xrange=(50.0, 220.0)) +(xvals, yvals, errors), (x_fit, y_fit) = signalProcessor.cnv_tgraph_to_np(sys_gr) fig, ax = plt.subplots(figsize=(20,10)) plt.plot(xvals, yvals, marker='+', mew=3, markersize=10, color='darkblue') plt.plot(x_fit, y_fit, linewidth=3.0, color='red') -test = polynomial(-8.7913353, 0.61710096, -0.014554635, 0.00011685881, 1.3328346e-06, -4.2065138e-08, 4.6959958e-10, -2.9405730e-12, 1.0885979e-14, -2.2317805e-17, 1.9584455e-20) outfile['misalignment_systematic'] = sys_gr - -# In[29]: - - -#Signal misalignment using the nominal no-beam radiative acceptance +#======================================================================================================================================= +# SIGNAL MISALIGNMENT: calculate using the radiative acceptance fits from above. +# 1. Use nominal signal (NO BEAM) and the nominal radiative acceptance (NO BEAM) to calculate expected signal. +# 2. Use misaligned signal (NO BEAM) and the misaligned radiative acceptance (NO BEAM) to calculate expected signal. +# 3. Calculate ratio between nominal and misaligned expected signal rates. +# *I've already done steps 1 and 2 externally, and am loading the results below. +#======================================================================================================================================= +#Load expected signal using nominal detector and nominal radiative acceptance (NO BEAM) infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simp_radacc_misalignment_v1/simp_systematic_nominal.root' with uproot.open(infile) as f: nominal_h = f['expected_signal_ap_h'].to_hist() +#Load expected signal using misaligned detector and misaligned radiative acceptance (NO BEAM) infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simp_radacc_misalignment_v1/simp_systematic_misaligned.root' with uproot.open(infile) as f: misaligned_h = f['expected_signal_ap_h'].to_hist() - ratio_h = f['expected_signal_ap_h'].to_hist().reset() + exp_sig_ratio_h = f['expected_signal_ap_h'].to_hist().reset() #make copy to use as ratio plot outfile['expected_signal_nominal'] = nominal_h outfile['expected_signal_misaligned'] = misaligned_h -nominal_h.plot() -plt.show() -misaligned_h.plot() -plt.show() - -#take ratio of densities, misaligned to nominal -ratio = misaligned_h.values()/nominal_h.values() -# Find where nominal_h values are less than 1.0 -mask = nominal_h.values() < 1.0 -# Set corresponding ratio values to 0 -ratio[mask] = 0 - -xbins = ratio_h.axes[0].centers -ybins = ratio_h.axes[1].centers +# Calculate expected signal ratio between nominal and misaligned +ratio = nominal_h.values()/misaligned_h.values() +xbins = exp_sig_ratio_h.axes[0].centers +ybins = exp_sig_ratio_h.axes[1].centers xgrid, ygrid = np.meshgrid(xbins, ybins, indexing='ij') -ratio_h.reset() -ratio_h.fill(xgrid.flatten(), ygrid.flatten(), weight=ratio.flatten()) -outfile['signal_systematic_ratio'] = ratio_h +exp_sig_ratio_h.fill(xgrid.flatten(), ygrid.flatten(), weight=ratio.flatten()) +outfile['signal_systematic_ratio'] = exp_sig_ratio_h +#Plot ratio in 2d fig, ax = plt.subplots(figsize=(25,15)) -ratio_h.plot(cmin=np.min(ratio.flatten()[ratio.flatten()>0.0]), cmax=np.max(ratio.flatten())) -plt.savefig('simp_radacc_misaligned_v1.png') +exp_sig_ratio_h.plot(cmin=np.min(ratio.flatten()[ratio.flatten()>0.0]), cmax=np.max(ratio.flatten())) +plt.savefig(f'{outdir}/simp_radacc_misaligned_v1.png') +#======================================================================================================================================= +# CALCULATE SYSTEMATIC UNCERTAINTY: Using the 2d expected signal plot of nominal/misaligned, decide how to get systematic. +#======================================================================================================================================= -# In[66]: - - -#Combine radiative acceptance and signal systematics -#For each A' invariant mass, take the largest value, since this is in the numerator and reduces the expected signal rate +#Foddr each A' invariant mass, take the largest value, since this is in the numerator and reduces the expected signal rate +# The systematic uncertainty on the signal acceptance resulting from detector misaligned is calculated as a function of A' mass. +# For each mass, take the maximum ratio across the relevant range of epsilon^2 to be the uncertainty. sigsys_y = [] sigsys_x = [] -for xbin,mass in enumerate(ratio_h.axes[0].centers): - sigsys = np.max(ratio_h.values()[xbin]) +for xbin,mass in enumerate(exp_sig_ratio_h.axes[0].centers): + sigsys = np.max(exp_sig_ratio_h.values()[xbin]) if sigsys == 0.0: continue sigsys_x.append(mass) @@ -381,19 +265,14 @@ def nonUniBinning(histo, start, size): sigsys_gr = r.TGraph(len(sigsys_x), np.array(sigsys_x), np.array(sigsys_y)) params_sigsys, errors_sigsys = fit_plot_with_poly(sigsys_gr, tgraph=True, specify_n = 5, set_xrange=True, xrange=(sigsys_x[0], sigsys_x[-1])) -#params_sigsys, errors_sigsys = fit_plot_with_poly(sigsys_gr, tgraph=True, set_xrange=True, xrange=(sigsys_x[0], sigsys_x[-1])) print(params_sigsys) (sigsys_x, sigsys_y, sigsys_errors), (sigsys_xfit, sigsys_yfit) = cnv_tgraph_to_np(sigsys_gr) plt.plot(sigsys_x, sigsys_y) plt.plot(sigsys_xfit, sigsys_yfit) -#Looks like we should just choose the overall maximum... sigsys_final = np.max(sigsys_yfit) print(f'Signal misalignment acceptance systematic: {sigsys_final}') -# In[93]: - - #Combine the signal and radiative acceptance systematics radsys_fitpoly = polynomial(-10.307720, 0.97578691, -0.036585723, 0.00077903787, -1.0393704e-05, 9.0187487e-08, -5.0948313e-10, 1.8078746e-12, -3.6566050e-15, 3.2111742e-18) masses = np.array([float(x) for x in range(60,230,1)]) @@ -402,9 +281,3 @@ def nonUniBinning(histo, start, size): fig, ax = plt.subplots(figsize=(25,15)) plt.plot(masses, misalignmentsys, marker='+', markersize=10, mew=3, color='black') - -# In[ ]: - - - - From ee109f784c65ccd946820080dcc4d396922abd4f Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Fri, 6 Sep 2024 13:59:32 -0700 Subject: [PATCH 23/27] still documenting --- plotUtils/simps/plot_final_results.py | 9 +- plotUtils/simps/run_signal_search.py | 222 +++++--- plotUtils/simps/simp_signal_2016.py | 503 ++++++++++++++++-- ..._acceptance_target_position_systematic.py} | 185 +------ .../simps/systematics/v0projsig_systematic.py | 78 ++- 5 files changed, 697 insertions(+), 300 deletions(-) rename plotUtils/simps/systematics/{radiative_acceptance_systematic.py => radiative_acceptance_target_position_systematic.py} (53%) diff --git a/plotUtils/simps/plot_final_results.py b/plotUtils/simps/plot_final_results.py index f30193ab3..cd0772ba3 100644 --- a/plotUtils/simps/plot_final_results.py +++ b/plotUtils/simps/plot_final_results.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python3 import os import awkward as ak import numpy as np @@ -34,12 +34,11 @@ parser.add_argument('--mpifpi', type=float, default=4.*np.pi) args = parser.parse_args() +outdir = args.outdir ######################################################################################################################################## -#Input file should be output of run_signal_search.py -infile_signal_search = args.infile_signal_search #Grab search results (exp bkg, obs, local pvalues) -with uproot.open(infile) as f: +with uproot.open(args.infile_signal_search) as f: expected_bkg = f['expected_background'].values() expected_bkg_errlow = f['expected_background'].errors('low')[1] expected_bkg_errhigh = f['expected_background'].errors('high')[1] @@ -50,7 +49,7 @@ #Calculate the "Look-Elsewhere Effect" correction. Total search window mass range divided by avg mass resolution search_window = 1.5 #used in final analysis -signalProcessor = simp_signal_2016.SignalProcessor(args.mpifpi., search_window) +signalProcessor = simp_signal_2016.SignalProcessor(args.mpifpi, search_window) masses = expected_bkg[0] masses = masses[np.where(masses <= 124.0)[0]] #I restricted search for signal to 124 MeV avg_resolution = np.average( np.array([signalProcessor.mass_resolution(x) for x in masses]) ) diff --git a/plotUtils/simps/run_signal_search.py b/plotUtils/simps/run_signal_search.py index 2c04b6666..1558ef88d 100644 --- a/plotUtils/simps/run_signal_search.py +++ b/plotUtils/simps/run_signal_search.py @@ -10,8 +10,14 @@ import simp_signal_2016 from simp_theory_equations import SimpEquations as simpeqs import copy +#======================================================================================================================================= +# Initialize +#======================================================================================================================================= +# --outfilename: Specify output file name. +# --trials: Number of toy mc trials used to calculate data significance (typically millions+). +# --highPsum: If set, use high psum selection (control region). +# --tenpct: If True search for signal in 10% data. If False search for signal in 100% data. -############################################################################################# import argparse parser = argparse.ArgumentParser(description='Process some inputs.') parser.add_argument('--outfilename', type=str, default='expected_background_test') @@ -25,19 +31,23 @@ nsigma=1.5 #+-1.5*mass_res (3sigma wide in total) left_nsigma=nsigma+4.0 #mass sideband edge (sideband is 4sigma wide) right_nsigma=nsigma+4.0 -z0_floor_threshold=1000 #Defines the sideband size in min_z0. 1000 is arbitrary choice, that performs well (no signal contam) +z0_floor_threshold=1000 #Defines the sideband size in min_z0. 1000 is arbitrary choice that performs well (no signal contam) print(f'Search Window Size: +-', nsigma) +#======================================================================================================================================= +# Read data +#======================================================================================================================================= #Import all definitions from simp_signal_2016, so that these results are consistent with the expected signal calculations signalProcessor = simp_signal_2016.SignalProcessor(mpifpi=4.*np.pi, nsigma=1.5) data = ak.Array([]) + if args.tenpct: + # Search for signal in 10% data + print('Loading 10% Data') outfilename = f'{outfilename}_10pct' - #Load 10% data signal region inv_mass_range = (26,200) - print('Loading 10% Data') branches = ["unc_vtx_mass","unc_vtx_psum", "unc_vtx_ele_track_z0", "unc_vtx_pos_track_z0", "unc_vtx_z", "unc_vtx_proj_sig"] infile = '/sdf/group/hps/user-data/alspellm/2016/data/full_hadd_blpass4c_ana.root' selection = 'vtxana_Tight_2016_simp_reach_SR' @@ -45,39 +55,55 @@ data['weight'] = 1.0 else: - outfilename = f'{outfilename}_100pct' - #Load 100% data + # Search for signal in 100% data print('Loading 100% Data') + outfilename = f'{outfilename}_100pct' inv_mass_range = (30,124) branches = ["unc_vtx_mass","unc_vtx_psum", "unc_vtx_ele_track_z0", "unc_vtx_pos_track_z0", "unc_vtx_z", "unc_vtx_proj_sig"] indir = '/fs/ddn/sdf/group/hps/users/alspellm/data_storage/pass4kf/pass4kf_ana_20240513' - mass_safety = 'unc_vtx_mass*1000. >= 0' + mass_safety = 'unc_vtx_mass*1000. >= 0' #choose to blind certain masses if necessary + #If high psum, can look at all masses if args.highPsum: selection = 'vtxana_Tight_2016_simp_reach_CR' else: selection = 'vtxana_Tight_2016_simp_reach_SR' - #inv_mass_range = (135,200) - #mass_safety = 'unc_vtx_mass*1000. > 135' #CANT LOOK BELOW THIS MASS UNTIL UNBLINDING! + #Loop over all 100% data vertex ana tuples for filename in sorted(os.listdir(indir)): if not filename.endswith('.root'): continue run = filename.split('_')[4] print('Loading Run ', run) infile = os.path.join(indir,filename) + + #build awkward array for 100% data data = ak.concatenate([data, signalProcessor.load_data(infile, selection, cut_expression=mass_safety, expressions=branches)]) data['weight'] = 1.0 ####################################################################################################################################### +#======================================================================================================================================= +# Useful Functions +#======================================================================================================================================= poisson_low_err = lambda n : np.sqrt(n - 0.25) if n >= 0.25 else 0.0 poisson_up_err = lambda n : np.sqrt(n+0.75) + 1 def get_abcd_error(A, B, C, D, E): + """ + Calculate the estimated background error bars using the ABCD method. + + Args: + A,B,C,D,E (int): Number of events in each region. + Returns: + sigma_F_up, sigma_F_low (float, float): Error bars on the estimated background in region F (signal region). + """ + + #Calculate poisson errors on regions A and E combined. sigma_AE_up = poisson_up_err(A+E) sigma_AE_low = poisson_low_err(A+E) + #Calculate Gaussian errors on regions B+D and region C. sigma_BD = math.sqrt(B+D) sigma_C = math.sqrt(C) @@ -101,31 +127,62 @@ def get_abcd_error(A, B, C, D, E): return sigma_F_up, sigma_F_low def calc_exp_bkg(A, B, C, D, E): - #If A+E are 0, set A+E = value, based on nsigma errors. - #A poisson with mean=0.4 will throw 0 64% of the time + """ + Calculates the expected background using the counts in regions A, B, C, D, and E. + + Args: + A, B, C, D, E (int): Number of events in each region. + + Returns: + exp_bkg (float): Expected background using ABCD estimation. + counts (list[int]): List of integers corresponding to counts in each region. + + Notes: + If the counts in A+E < 1, set A+E = 0.4. + 0.4 is the Poisson mean where 0 is observed ~68% of the time. + """ + # Set the minimum count rate in A+E if A+E < 0.4: A = 0.4 E = 0.0 + + #Expected background ABCD calculation exp_bkg = C*((A+E)/(B+D)) counts = [A, B, C, D, E] return exp_bkg, counts def run_abcd_method(data, signal_mass): + """ + Calculate the expected background, with errors, in the invariant mass window signal region by defining the regions ABCDEF. + Also return the number of events observed in the signal region. + + Args: + data (awkward array): Input data array. + signal_mass (float): Center of the invariant mass search window in dark vector meson mass. + + Returns: + expected_bkg (float): Expected background in the search window signal region estimated using ABCD method. + nobs (float): Number of events observed in the search window signal region. + bkg_error ([float,float]): [upper,lower] error bars on the expected background. + counts (list[float]): Number of events in each region except search window signal region [A, B, C, D, E]. + """ - #Get the search window + # Define the boundaries of the invariant mass search window mass_low = signal_mass - signalProcessor.mass_resolution(signal_mass)*nsigma mass_high = signal_mass + signalProcessor.mass_resolution(signal_mass)*nsigma + # Define the upper and lower boundaries of the mass sidebands. left_xlow = signal_mass - signalProcessor.mass_resolution(signal_mass)*left_nsigma right_xhigh = signal_mass + signalProcessor.mass_resolution(signal_mass)*right_nsigma print(f'Left:{left_xlow}-{mass_low} | Search:{mass_low}-{mass_high} | Right:{mass_high}-{right_xhigh}') - #Signal mass window + # Apply the invariant mass search window mass selection on the data, as well as the sidebands. mass_sel = {} mass_sel[f'search_window'] = (data.unc_vtx_mass * 1000. <= mass_high) & (data.unc_vtx_mass * 1000. >= mass_low) mass_sel[f'left_sideband'] = (data.unc_vtx_mass * 1000. <= mass_low) & (data.unc_vtx_mass * 1000. >= left_xlow) mass_sel[f'right_sideband'] = (data.unc_vtx_mass * 1000. <= right_xhigh) & (data.unc_vtx_mass * 1000. >= mass_high) - #Fill z0 distributions for each region + # ABCD method extends the sidebands and search window into the minimum z0 (aka y0) parameter distribution. + # Define and fill the z0 (aka y0) distributions for each region. min_z0_h = ( hist.Hist.new .StrCategory(list(mass_sel.keys()), name='mass selection') @@ -135,12 +192,18 @@ def run_abcd_method(data, signal_mass): min_z0_h.fill(f'search_window', data[mass_sel[f'search_window']].unc_vtx_min_z0,weight=data[mass_sel[f'search_window']].weight ) min_z0_h.fill(f'left_sideband', data[mass_sel[f'left_sideband']].unc_vtx_min_z0, weight=data[mass_sel[f'left_sideband']].weight) min_z0_h.fill(f'right_sideband', data[mass_sel[f'right_sideband']].unc_vtx_min_z0, weight=data[mass_sel[f'right_sideband']].weight) + # Regions A E and F are defined based on the value of the minimum z0 cut in region F minz0_coeffs = signalProcessor.get_minz0_cut() min_z0_cut = signalProcessor.polynomial(minz0_coeffs[0],minz0_coeffs[1],minz0_coeffs[2])(signal_mass) - print('min_z0_cut: ', min_z0_cut) + + #Determine the min z0 cut floor. The ratio of potential signal to background in region C should be so small #as to be negligible, or else the expected background in region F will be overestimated due to signal contamination in C, #and our ability to make a discovery will be dramatically reduced. + + # Define the minimum z0 floor used to count events in regions B, C, and D. + # This floor is defined so that the background estimate is weighted towards the tails of the minimum z0 distributions rather + # than the core of the distribution. However, the tails cannot be so small that signal contamination is an issue in C. xwidth = min_z0_h[f'search_window',:].axes[0].widths[0] xmax = min_z0_cut - 2*xwidth threshold = z0_floor_threshold @@ -152,19 +215,20 @@ def run_abcd_method(data, signal_mass): break z0_floor = round(xmax,2) + # Count the events in each region B = min_z0_h[f'left_sideband',:][hist.loc(z0_floor):hist.loc(min_z0_cut):sum] A = min_z0_h[f'left_sideband',:][hist.loc(min_z0_cut)+1::sum] D = min_z0_h[f'right_sideband',:][hist.loc(z0_floor):hist.loc(min_z0_cut):sum] E = min_z0_h[f'right_sideband',:][hist.loc(min_z0_cut)+1::sum] C = min_z0_h[f'search_window',:][hist.loc(z0_floor):hist.loc(min_z0_cut):sum] counts = [A, B, C, D, E] + + #Calculate the error on the background estimate according to the counts bkg_error = get_abcd_error(A, B, C, D, E) + #Calculate the expected background expected_bkg, counts = calc_exp_bkg(A, B, C, D, E) - #Get number of observed events - #tight_sel = signalProcessor.tight_selection(data, signal_mass, case=1) - - print('final sel') + # Determine the observed number of events after applying the last selection criteria, mass and minimum z0. minz0_sel = signalProcessor.minz0_sel(data) masswindow_sel = signalProcessor.mass_sel(data, signal_mass) final_sel = np.logical_and.reduce([masswindow_sel, minz0_sel]) @@ -173,48 +237,67 @@ def run_abcd_method(data, signal_mass): min_z0_h.reset() return expected_bkg, nobs, bkg_error, counts -####################################### P-Value Functions ########################################################################### def get_t0(A, B, C, D, E, ntrials=100000): - - # Include background estimate statistical uncertainty in p-value calculation - # Sample three parent distributions - # Gaussian for (B+D) - # Gaussian for C - # Poisson for (A+E) - # Calculate expected background F. This is the mean of the expected background parent distribution, Poisson with mean F - # Sample the F parent distribution to measure the test statistic t0 - # Build test statistic distribution - + """ + Constructs the background-only test-statistic distribution used to calculate the significance of the data in each search window. + This test-statistic distribution has the statistical uncertainty on the background baked into it. + + Args: + A, B, C, D, E (int): Counts in each region. + ntrials (int, optional): Specify the number of toy MC trials used to construct the test-stat distribution. + This should be large enough to achieve a statistically significant right tail. + + Returns: + t0_distribution (hist): Hist histogram of the background-only test-statistic distribution. + Performing a one-sided tail integral of this histogram starting from the number of observed events + gives the local p-value of the data in the search window signal region. + """ + + # Define the background-only test-statistic distribution t0_distribution = ( hist.Hist.new .Reg(500, 0.0, 500.0, label='Expected Background Toy MC Trials') .Double() ) - # Vectorized sampling + # Sample the parent distributions of each region used to estimate the background rate A_E_s = np.random.poisson(lam=(A+E), size=ntrials) B_D_s = np.random.normal(loc=(B+D), scale=np.sqrt(B+D), size=ntrials) C_s = np.random.normal(loc=C, scale=np.sqrt(C), size=ntrials) - # Calculate F and t0 for all trials - F = (A_E_s / B_D_s) * C_s - t0 = np.random.poisson(lam=F) + # Calculate the expected background (b) and t0 for all trials + b = (A_E_s / B_D_s) * C_s + + #Use the expected background b as the true mean of the background parent distribution (which is an approximation) + t0 = np.random.poisson(lam=b) - # Fill histogram + # Fill background-only test-statistic distribution t0_distribution.fill(t0) return t0_distribution def get_pvalue(test_stat_h, nobs): + """ + Calculate the local p-value of the data in the search window signal region. + + Args: + test_stat_h (hist histogram): Background-only test-statistic distribution returned by get_t0(). + nobs (float0: Number of events observed in the search window signal region. - #Get the number of events >= nobs + Returns: + mean (float): Local p-value mean. + low_err (float): Lower error bar on local p-value mean. + up_err (float): Upper error bar on local p-value mean. + """ + + # Count the number of events in the background-only test-statistic distribution greater than the number of events observed. + # This will be normalized to the total distribution. try: nover = test_stat_h[hist.loc(nobs)::sum] except: nover = 0.0 - print('LOOK. NOVER = ', nover) - #make a numerator and denominator histogram, convert to ROOT to get correct Poisson errors, then divide into TGraphAsymm + # Define hist histograms to store the total number of events in the background-only test-stat distribution, and nover. numer_h = ( hist.Hist.new .Reg(1, 0.0, 1.1, label='Events past nobs') @@ -230,7 +313,9 @@ def get_pvalue(test_stat_h, nobs): denom_h.fill(np.ones(int(test_stat_h[::sum]))) test_stat_h.reset() - #convert to ROOT to get exact poisson errors + # Convert the hist histograms to ROOT histograms so that they can be divided and have the correct error bars. + # Seems messy, but filling the hist histograms and then converting to ROOT is *way* faster when you have 100 million events. + # The reason we use histograms is that we get the Clopper Pearson errors when we divide two ROOT histograms. histos = [numer_h, denom_h] uproot_file = uproot.recreate(f'tmp_cnv_histos.root') for i, histo in enumerate(histos): @@ -241,12 +326,13 @@ def get_pvalue(test_stat_h, nobs): histos[i] = copy.deepcopy(infile.Get(f'histo_{i}')) infile.Close() - #Divide number past versus number generated to get clopper pearson errors + # The local p-value of the search window signal region is calculated by taking the ratio of nover to the total number of toys histos[0].SetBinErrorOption(1) histos[1].SetBinErrorOption(1) result_g = r.TGraphAsymmErrors() - result_g.Divide(histos[0], histos[1], opt="cp") + result_g.Divide(histos[0], histos[1], opt="cp") # Specify clopper pearson errors for correct error bars + # Reset the histograms to clear mem numer_h.reset() denom_h.reset() @@ -256,10 +342,18 @@ def get_pvalue(test_stat_h, nobs): return mean, low_err, up_err -#################################################################################################################################### -#Loop over mass and signal +#======================================================================================================================================= +# SEARCH FOR SIGNAL: Calculates the local p-value of each invariant mass search window signal region. +#======================================================================================================================================= + +#======================================================================================================================================= +# INITIALIZE +#======================================================================================================================================= + +# Define the invariant mass range to perform the search for signal inv_masses = np.array([x for x in range(inv_mass_range[0], inv_mass_range[-1])]) +# Save the results to these arrays exp_bkg_mev=[] nobs_mev=[] bkg_uperror_mev = [] @@ -269,7 +363,7 @@ def get_pvalue(test_stat_h, nobs): pvalue_uperr_mev = [] pvalue_lowerr_mev = [] -#Set signal/control region +# Select the data set (10% or 100%) and the Psum region to search for signal if not args.tenpct and args.highPsum: psum_sel = signalProcessor.psum_sel(data, case='cr') elif args.tenpct and not args.highPsum: @@ -279,20 +373,22 @@ def get_pvalue(test_stat_h, nobs): else: psum_sel = signalProcessor.psum_sel(data, case='sr') -#get the Tight selection, without mass and minz0 cuts -#init_sel = signalProcessor.tight_selection(data, 0.0, case=3) -zcut_sel = signalProcessor.zcut_sel(data) -vprojsig_sel = signalProcessor.vprojsig_sel(data) -sameside_sel = signalProcessor.sameside_z0_cut(data) -initial_sel = np.logical_and.reduce([zcut_sel, vprojsig_sel, psum_sel, sameside_sel]) -print(initial_sel) -print(np.max(data[initial_sel].unc_vtx_min_z0)) +# Initialize subset of the pre-defined selections. These do not yet include mass selection or minimum z0 (aka y0) cut. +# Those cuts can't be applied before the background estimation is completed. +zcut_sel = signalProcessor.zcut_sel(data) # Cut on reconstructed vertex z < -4.3 mm +vprojsig_sel = signalProcessor.vprojsig_sel(data) # Cut events with target projected vertex significance > 2.0 +sameside_sel = signalProcessor.sameside_z0_cut(data) # Cut events where both tracks have same-side z0 (degenerate cut...) +initial_sel = np.logical_and.reduce([zcut_sel, vprojsig_sel, psum_sel, sameside_sel]) # Combine initial selections + +#======================================================================================================================================= +# RUN SEARCH FOR SIGNAL +#======================================================================================================================================= -#Loop over invariant mass range +# Loop over invariant mass range for m,mass in enumerate(inv_masses): print(f'Running Signal Mass Window Center {mass}') - sel = zcut_sel & vprojsig_sel & psum_sel - print('sel') + + # Estimate the background in the search window signal region using ABCD method exp_bkg, nobs, bkg_error, counts = run_abcd_method(data[initial_sel], mass) bkg_lowerror_mev.append(bkg_error[1]) bkg_uperror_mev.append(bkg_error[0]) @@ -302,14 +398,16 @@ def get_pvalue(test_stat_h, nobs): print(f'background estimate: {exp_bkg} | nobs: {nobs} | counts: {counts} | bkg error: {bkg_error}') #Calculate the p-value by building the test statistic distribution t0 + # Calculate the significance of the data (local p-value) by constructing the background-only test-statistic distribution + # and then performing a one-tailed integral starting at the observed number of events in the search window signal region t0_distr_h = get_t0(counts[0], counts[1], counts[2], counts[3], counts[4], ntrials=t0_trials) pmean, plow_err, pup_err = get_pvalue(t0_distr_h, nobs) - print('pvalue: ', pmean) pvalue_uperr_mev.append(pup_err) pvalue_lowerr_mev.append(plow_err) pvalue_mev.append(pmean) + print('local pvalue: ', pmean) -#cnv results to numpy floats +# Convert the search results into numpy arrays inv_masses = np.array(inv_masses, dtype=float) exp_bkg_mev = np.array(exp_bkg_mev, dtype=float) nobs_mev = np.array(nobs_mev, dtype=float) @@ -320,38 +418,42 @@ def get_pvalue(test_stat_h, nobs): pvalue_lowerr_mev = np.array(pvalue_lowerr_mev, dtype=float) +# Save the expected background with errors in a ROOT TGraphAsymmErrors expected_bkg_g = r.TGraphAsymmErrors(len(inv_masses), inv_masses, exp_bkg_mev, np.zeros(len(inv_masses)), np.zeros(len(inv_masses)), bkg_lowerror_mev, bkg_uperror_mev) expected_bkg_g.SetName('expected_background') expected_bkg_g.SetTitle('Expected Background;Vd Invariant Mass [MeV]; Events') +# Save the number of observed events in each search window signal region as a ROOT Tgraph nobs_g = r.TGraph(len(inv_masses), inv_masses, nobs_mev) nobs_g.SetName('Nobs') nobs_g.SetTitle('Observed;Vd Invariant Mass [MeV]; Events') +# Save the local p-value with errors for each search window signal region as a ROOT TGraphAsymmErrors pvalue_g = r.TGraphAsymmErrors(len(inv_masses), inv_masses, pvalue_mev, np.zeros(len(inv_masses)), np.zeros(len(inv_masses)), pvalue_lowerr_mev, pvalue_uperr_mev) pvalue_g.SetName('local_pvalue') pvalue_g.SetTitle('Local P-Value;Vd Invariant Mass [MeV]; local p-value') -#look elsewhere effect +# Calcualte the global p-value corrected for the "Look Elsewhere Effect" (LEE) avg_resolution = np.average(np.array([signalProcessor.mass_resolution(x) for x in inv_masses])) look_elsewhere = np.array((inv_masses[-1] - inv_masses[0])/avg_resolution) print(f'Average mass resolution: {avg_resolution}') print(f'Look elsewhere effect: {look_elsewhere}') +# Global p-value with LEE correction pvalue_global_g = r.TGraphAsymmErrors(len(inv_masses), inv_masses, pvalue_mev*look_elsewhere, np.zeros(len(inv_masses)), np.zeros(len(inv_masses)), pvalue_lowerr_mev*look_elsewhere, pvalue_uperr_mev*look_elsewhere) pvalue_global_g.SetName('global_pvalue') pvalue_global_g.SetTitle('Global P-Value;Vd Invariant Mass [MeV]; global p-value') - +# Save the search results outfile = r.TFile(f'{outfilename}.root', "RECREATE") outfile.cd() expected_bkg_g.Write() nobs_g.Write() pvalue_g.Write() pvalue_global_g.Write() - outfile.Close() +# Print the local and global thresholds for reference/recording thresholds = [] thresholds_lew = [] from scipy.stats import norm diff --git a/plotUtils/simps/simp_signal_2016.py b/plotUtils/simps/simp_signal_2016.py index 2f6588826..99d0722e7 100644 --- a/plotUtils/simps/simp_signal_2016.py +++ b/plotUtils/simps/simp_signal_2016.py @@ -1,3 +1,23 @@ +#!/usr/bin/python3 +#======================================================================================================================================= +""" +SignalProcessor Class +--------------------- +This script defines the 'SignalProcessor' class, which handles the expected signal calculations used in the 2016 SIMP L1L1 analysis. +This script also imports the necessary simp equations to perform the calculations. +This processor operates on the output of the hpstr vertex analysis processor, which is a flat tuple of events. + +Modules: + - os + - awkward as ak + - numpy as np + - hist + - uproot + - ROOT as r + - argparse + - simp_theory_equations (imported as SimpEquations) +""" + import os import awkward as ak import numpy as np @@ -6,18 +26,46 @@ import uproot import math import ROOT as r -#import matplotlib as mpl import copy -#import mplhep -#import matplotlib.pyplot as plt -#from matplotlib.backends.backend_pdf import PdfPages -#mpl.style.use(mplhep.style.ROOT) import argparse from simp_theory_equations import SimpEquations as simpeqs +#======================================================================================================================================= class SignalProcessor: + """ + A class for handling signal data processing and physics parameter calculations from SIMPs. + + Attributes: + nsigma (float): Size of the invariant mass search window in terms of the mass resolution (default is 1.5). + alpha_dark (float): The dark sector fine structure constant. + mass_ratio_ap_to_vd (float): Ratio of dark photon mass to dark vector meson mass. + mass_ratio_ap_to_pid (float): Ratio of dark photon mass to pion mass. + mass_lepton (float): ele mass (in MeV). + target_pos (float): Target position in mm (default is -4.3 mm) + mpifpi (float): ratio of dark pion mass and dark pion decay constant, default value i 4*pi + mass_resolution (function): Polynomial function representing mass resolution. + radiative_fraction (function): Polynomial function for radiative fraction. + radiative_acceptance (function): Polynomial function for radiative acceptance. + psum_reweighting (function): Polynomial function for reweighting based on MC bkg and data Psum. + minz0_cut_poly (function): Polynomial function for minimum z0 cut. + cr_psum_low (float): Lower bound for CR psum. + cr_psum_high (float): Upper bound for CR psum. + sr_psum_low (float): Lower bound for SR psum. + sr_psum_high (float): Upper bound for SR psum. + trident_differential_production: Holds the differential trident production rate + """ + def __init__(self, mpifpi=4*np.pi, nsigma=1.5): - self.nsigma = nsigma + """ + Initializes the SignalProcessor with default SIMP model parameters and physics constants. + + Args: + mpifpi (float, optional): benchmark values are 3 and 4pi. + nsigma (float, optional): Size of the invariant mass search window in terms of the mass resolution (default is 1.5). + """ + + #search window size + self.nsigma = nsigma #SIMP parameters self.alpha_dark = 0.01 @@ -26,21 +74,32 @@ def __init__(self, mpifpi=4*np.pi, nsigma=1.5): self.mass_lepton = 0.511 self.target_pos = -4.3 self.mpifpi = mpifpi - ###2016 KF MC with hit killing and momentum smearing + + #fit functions calculated externally self.mass_resolution = self.polynomial(.75739851, 0.031621002, 5.2949672e-05) self.radiative_fraction = self.polynomial(0.10541434, -0.0011737697, 7.4487930e-06, -1.6766332e-08) self.radiative_acceptance = self.polynomial(-0.48922505, 0.073733061, -0.0043873158, 0.00013455495, -2.3630535e-06, 2.5402516e-08, -1.7090900e-10, 7.0355585e-13, -1.6215982e-15, 1.6032317e-18) self.psum_reweighting = self.polynomial(0.094272950, 0.87334446, -0.19641796) #GeV argument self.minz0_cut_poly = self.polynomial(1.07620094e+00 + 0.1, -7.44533811e-03, 1.58745903e-05) + + #signal and control region boundaries self.cr_psum_low = 1.9 self.cr_psum_high = 2.4 self.sr_psum_low = 1.0 self.sr_psum_high = 1.9 - ### + + #scales the expected signal to data self.trident_differential_production = None def set_radiative_acceptance(self, *coefficients): + """ + Sets the polynomial coefficients for the radiative acceptance function. + Used for systematic studies where the radiative acceptance changes from nominal. + + Args: + *coefficients (float): Coefficients of the polynomial function. + """ self.radiative_acceptance = self.polynomial(*coefficients) @staticmethod @@ -53,6 +112,18 @@ def _implementation(x): return _implementation def load_data(self, filepath, selection, cut_expression=None, expressions=None): + """ + Loads data from hpstr vertex ana processor output file using Uproot, applying selection and cuts if provided. + + Args: + filepath (str): Path to the ROOT file. + selection (str): The dataset selection (subdir representing hpstr lvl selection) + cut_expression (str, optional): A cut expression to filter data. + expressions (list, optional): List of specific expressions (branches) to load. + + Returns: + awkward.Array: An array of selected data. + """ with uproot.open(filepath) as f: events = f[f'{selection}/{selection}_tree'].arrays( cut=cut_expression, @@ -71,10 +142,28 @@ def safe_divide(numerator, denominator, default=0.0): return result def load_pre_readout_signal_z_distribution(self, filepath): + """ + Loads MC signal vertex z distribution output from the hpstr mcana processor + + Args: + filepath (str): Path to ROOT file. + """ with uproot.open(filepath) as sig_f: return sig_f['mcAna/mcAna_mc625Z_h'].to_hist() def load_signal(self, filepath, pre_readout_filepath, mass_vd, selection, cut_expression=None, branches=None): + """ + Loads MC signal from hpstr vertex ana processor output. + + Args: + filepath (str): Path to ROOT file. + pre_readout_filepath (str): mc ana file corresponding to signal mass. + mass_vd (float): dark vector mass. + selection (str): subdir where ttree is located in ROOT file. + cut_expression (str, optional): A cut expression to filter data (Ex: "unc_vtx_psum < 1.9") + branches (str, optional): List of branches to load (default ones necessary for analysis) + + """ with uproot.open(filepath) as sig_f: if branches: events = sig_f[f'{selection}/{selection}_tree'].arrays( @@ -95,10 +184,16 @@ def load_signal(self, filepath, pre_readout_filepath, mass_vd, selection, cut_ex events['psum_reweight'] = self.psum_reweighting(events.unc_vtx_psum) events['psum_reweight'] = ak.where(events['psum_reweight'] > 1., 1., events['psum_reweight']) - + #load the mc truth signal vertex distribution. Used to calculate signal acceptance*eff as F(z) not_rebinned_pre_readout_z_h = self.load_pre_readout_signal_z_distribution(pre_readout_filepath) def sample_pre_readout_probability(z): + """ + Calculates the signal acceptance*efficiency as a function of F(z) + + Args: + z (float): truth vertex z bin. + """ if z < not_rebinned_pre_readout_z_h.axes[0].edges[0]: return 0. if z > not_rebinned_pre_readout_z_h.axes[0].edges[-1]: @@ -113,6 +208,22 @@ def sample_pre_readout_probability(z): return events def _load_trident_differential_production_lut(self, background_file, selection, signal_mass_range, mass_window_width, tenpct=True, full_lumi_path=None): + """ + Loads lookup table that stores the differential radiative trident rate as a function of A' mass. + This rate scales the expected signal to the data set. + + Args: + background_file (str): hpstr vertex ana ROOT file containing reconstructed+selected background events in CR. + selection (str): subdir where ttree is located in ROOT file. + signal_mass_range (tuple): range of dark vector masses that expected signal will be calculated for. + mass_window_width (float): width of the mass window used to calculate the reconstructed bkg rate (it gets averaged) + tenpct (bool, optional): If True, use 10% data to normalize signal. If False, use 100% data (must provide full lumi path). + full_lumi_path (str, optional): If tenpct=False, provide path to 100% data. + + Returns: + The look-up table for radiative trident differential production rates as function of A' mass. + + """ dNdm_by_mass_vd = {} bkgd_CR = ak.Array([]) @@ -147,17 +258,51 @@ def _load_trident_differential_production_lut(self, background_file, selection, return dNdm_by_mass_vd def trident_differential_production(self, mass_vd): + """ + Returns the radiative trident differential production rate. + + Args: + mass_vd (float): dark vector mass + + Notes: + If the dark vector mass isn't found, could be the result of the mvd -> map conversion. + """ if int(mass_vd) in self.trident_differential_production.keys(): return self.trident_differential_production[mass_vd] raise ValueError(f'The dark vector mass {mass_vd} is not found in the trident differential production look-up table.') - #Use the reconstructed data in the high psum region to scale the differential radiative trident production rate - #This scales the A' production rate, therefore the expected signal def set_diff_prod_lut(self,infile, preselection, signal_mass_range, tenpct=True, full_lumi_path=None): + """ + Initializes the trident differential production lookup table. + + Args: + infile (str): hpstr vertex ana ROOT file containing reconstructed+selected background events in CR. + preselection (str): ROOT file subdir containing preselection ttree. + signal_mass_range (tuple): dark vector meson mass range + tenpct (bool, optional): If True use 10% data sample. If False, use 100% data (requires full_lumi_path). + full_lumi_path (str, optional): If tenpct == False, provide path to 100% data. + + Notes: + This was all developed using 10% data stored in a single ROOT file, hence the tenpct option. But you can pass any + single ROOT file with tenpct set to True. + The final analysis uses 100% data, which consists of multiple separate root files located at full_lumi_path. + """ + #Initialize the lookup table to calculate the expected signal scale factor self.trident_differential_production = self._load_trident_differential_production_lut(infile, preselection, signal_mass_range, 2.0*self.nsigma, tenpct=tenpct, full_lumi_path=full_lumi_path) def total_signal_production_per_epsilon2(self, signal_mass): + """ + Calculates the total dark vector meson production rate as a function of epsilon^2 + + Args: + signal_mass (float): dark vector meson mass. + + Notes: + Notice that you pass the dark vector meson mass, and it gets converted to an A' mass. + The radiative fraction, acceptance, and trident prod are all calculated using the A' mass, NOT the vector mass! + The dark vector to A' mass ratio is set upon initializing the instance of this class. + """ mass_ap = self.mass_ratio_ap_to_vd*signal_mass return ( (3. * (137. / 2.) * np.pi) @@ -167,49 +312,107 @@ def total_signal_production_per_epsilon2(self, signal_mass): ) def get_exp_sig_eps2(self, signal_mass, signal_array, eps2): + """ + Calculates the neutral dark vector meson signal acceptance*efficiency*probability for each MC signal event for value eps^2. + This function reweights the MC signal events by considering the dark vector (rho and phi) decay probabilities. - #Define simp masses - mass_ap = self.mass_ratio_ap_to_vd*signal_mass - mass_pid = mass_ap / self.mass_ratio_ap_to_pid - fpid = mass_pid / self.mpifpi + Args: + signal_mass (float): The generated dark vector meson mass. + signal_array (awkward array): MC signal array loaded by load_signal. + eps2 (float): The square of the kineitc mixing strength parameter (affects decay probabilities). + Returns: + signal_array (awkward array): The input signal array with added columns for the reweighted signal + acceptance*efficiency*probability ('reweighted_accxEff'). + + Notes: + The vector decay probabilities depend on the A' mass, dark pion mass, and mpi/fpi, all of which are initialized + with the instance of this class. + + """ + + #Define SIMP masses based on mass ratios and constants + mass_ap = self.mass_ratio_ap_to_vd*signal_mass # A' mass from Vd mass + mass_pid = mass_ap / self.mass_ratio_ap_to_pid # Dark pion mass from A' mass + fpid = mass_pid / self.mpifpi # Dark pion decay constant from ratio of dark pion mass to dark pion decay constant + + #Calculate the decay length in the lab frame for rho and phi rho_gctau = signal_array.vd_true_gamma * simpeqs.getCtau(mass_ap, mass_pid, signal_mass, np.sqrt(eps2), self.alpha_dark, fpid, self.mass_lepton, True) phi_gctau = signal_array.vd_true_gamma * simpeqs.getCtau(mass_ap, mass_pid, signal_mass, np.sqrt(eps2), self.alpha_dark, fpid, self.mass_lepton, False) + #Calculate the decay weights for rho and phi based on the vertex z position and gammactau rho_decay_weight = (np.exp((self.target_pos - signal_array.vd_true_vtx_z) / rho_gctau) / rho_gctau) phi_decay_weight = (np.exp((self.target_pos - signal_array.vd_true_vtx_z) / phi_gctau) / phi_gctau) - signal_array['reweighted_accxEff_rho'] = simpeqs.br_Vrho_pi(mass_ap, mass_pid, signal_mass, self.alpha_dark, fpid)*rho_decay_weight*signal_array.event_weight_by_uniform_z*signal_array.psum_reweight - print(simpeqs.br_Vrho_pi(mass_ap, mass_pid, signal_mass, self.alpha_dark, fpid)) - signal_array['reweighted_accxEff_phi'] = simpeqs.br_Vphi_pi(mass_ap, mass_pid, signal_mass, self.alpha_dark, fpid)*phi_decay_weight*signal_array.event_weight_by_uniform_z*signal_array.psum_reweight + #signal_array['reweighted_accxEff_rho'] = simpeqs.br_Vrho_pi(mass_ap, mass_pid, signal_mass, self.alpha_dark, fpid)*rho_decay_weight*signal_array.event_weight_by_uniform_z*signal_array.psum_reweight + #signal_array['reweighted_accxEff_phi'] = simpeqs.br_Vphi_pi(mass_ap, mass_pid, signal_mass, self.alpha_dark, fpid)*phi_decay_weight*signal_array.event_weight_by_uniform_z*signal_array.psum_reweight + #Calculate the combined decay weight for both rho and phi mesons. + #This result represenets the overall expected signal decay. combined_decay_weight = ( (rho_decay_weight * simpeqs.br_Vrho_pi(mass_ap, mass_pid, signal_mass, self.alpha_dark, fpid)) + (phi_decay_weight * simpeqs.br_Vphi_pi(mass_ap, mass_pid, signal_mass, self.alpha_dark, fpid)) ) - # the weight for a single event is the chance of that decay (z and gamma from either Vd) - # multiplied by probability the event was from that z-bin in the original sample + #The final reweighting includes both rho and phi, and includes the signal acceptance*efficiency as a function of z via + #signal_array.event_weight_by_uniform_z. Psum re-weighting (calculated externally, saved in this classs) is also included. signal_array['reweighted_accxEff'] = combined_decay_weight*signal_array.event_weight_by_uniform_z*signal_array.psum_reweight return signal_array @staticmethod def get_minz0_cut(): + """ + Defines minimum z0 cut polynomial coefficients. + These coefficients were determined by optimizing this cut using ZBi as a function of MC signal mass. + The optimized cut values as a function of mass were then fit with a polynomial. + The cut shape was then tightened by 0.1 mm. + + Returns: + coeffs (list): minimum z0 cut function coefficients. + + + """ coeffs = [1.07620094e+00 + 0.1, -7.44533811e-03, 1.58745903e-05] return coeffs def minz0_sel(self,array): + """ + Applies the minimum z0 cut. + + Args: + array (awkward array): data array. + + Returns: + sel (awkward array): Boolean array representing events passing the minimum z0 cut. + """ + # Retrieve coefficients for the minimum z0 cut coeffs = self.get_minz0_cut() p0 = coeffs[0] p1 = coeffs[1] p2 = coeffs[2] + + # Get boolean mask of events that pass sel = ( ( array.unc_vtx_min_z0 > (p0 + p1*array.unc_vtx_mass*1000 + (p2*np.square(array.unc_vtx_mass*1000.))) ) ) return sel def mass_sel(self,array, signal_mass): + """ + Applies a mass window cut around the signal mass. + The width of the window is determined by the mass resolution of the number of sigma. + + Args: + array (awkward array): Data array. + signal_mass (float): The dark vector meson search window mass center. + + Returns: + sel (awkward array): Boolean array representing events inside the mass window. + """ + # Calcualte the lower and upper bounds of the mass search window based on the mass resolution mass_low = signal_mass - self.nsigma*self.mass_resolution(signal_mass) mass_high = signal_mass + self.nsigma*self.mass_resolution(signal_mass) + + # Get the boolean mask of events that pass sel = ( ( array.unc_vtx_mass*1000. >= {mass_low}) & (array.unc_vtx_mass*1000. <= {mass_high}) ) @@ -217,20 +420,44 @@ def mass_sel(self,array, signal_mass): @staticmethod def psum_sel(array, case='sr'): + """ + Applies the Psum selection (signal region or control region). + + Args: + array (awkward array): Data array. + case (str, optional): Specify signal region ('sr') or control region ('cr'). + + Returns: + sel (awkward array): Boolean array representing events in Psum region. + """ + if case == 'sr': + # Select momentum sum for signal region (between 1.0 and 1.9 GeV) sel = ( (array.unc_vtx_psum > 1.0) & (array.unc_vtx_psum < 1.9) ) elif case == 'cr': + # Select momentum sum for control region (between 1.9 and 2.4 GeV) sel = ( (array.unc_vtx_psum > 1.9) & (array.unc_vtx_psum < 2.4) ) else: + # No selection if invalid case sel = () return sel @staticmethod def vprojsig_sel(array): + """ + Applies cut on the target projected vertex significance. + + Args: + array (awkward array): Data array. + + Returns: + sel (awkward array): Boolean array representing events that pass this cut. + """ + # Select events with v0projsig < 2.0 sel = ( (array.unc_vtx_proj_sig < 2) ) @@ -238,6 +465,16 @@ def vprojsig_sel(array): @staticmethod def sameside_z0_cut(array): + """ + Applies a cut to remove events where both tracks have z0 (aka y0) with the same sign. + This cut doesn't seem to do much after the final cut, but could be useful earlier on in the analysis. + + Args: + array (awkward array): Data array. + + Returns: + sel (awkward array): Boolean array representing events that pass this cut. + """ sel = ( (-1.*(array.unc_vtx_ele_track_z0*array.unc_vtx_pos_track_z0) > 0) ) @@ -245,12 +482,24 @@ def sameside_z0_cut(array): @staticmethod def zcut_sel(array): + """ + Applies a cut on the reconstructed z vertex position. + + Args: + array (awkward array): Data array. + + Returns: + sel (awkward array): Boolean array representing events passing this cut. + """ + + # Select events where the reconstructed vertex z position is greater than -4.3 mm (target location) sel = ( (array.unc_vtx_z > -4.8) ) return sel + #Combine all of the selections into one function if you like. def tight_selection(self, array, signal_mass, case=1): coeffs = self.get_minz0_cut() p0 = coeffs[0] @@ -280,7 +529,19 @@ def tight_selection(self, array, signal_mass, case=1): @staticmethod def readROOTHisto(infilename, histoname): + """ + Quickly read a ROOT histogram from a root file, and make a deep copy. + + Args: + infilename (str): Input ROOT file. + histoname (str): Name of histogram being loaded. + + Returns: + histo (ROOT obj): Graph or histogram. + """ + #Open ROOT file infile = r.TFile(f'{infilename}',"READ") + #Make copy of histogram histo = copy.deepcopy(infile.Get(f'{histoname}')) infile.Close() return histo @@ -288,6 +549,17 @@ def readROOTHisto(infilename, histoname): @staticmethod def cnvHistosToROOT(histos=[], tempname='temporary_uproot'): + """ + Converts hist histograms to ROOT histograms. Convenient because ROOT automatically calculates errors. + Also nice if you want to present result using ROOT histogram formatting. + + Args: + histos (list[Hist]): List of hist histograms. + tempname (str): Name of temporary ROOT file. Necessary to save Hist using uproot, and then read ROOT out. + + Returns: + return_histos (list[ROOT histograms]): ROOT histograms. + """ return_histos = [] uproot_file = uproot.recreate(f'trash_{tempname}.root') for i, histo in enumerate(histos): @@ -309,8 +581,152 @@ def cnvHistoToROOT(histo, tempname='temporary_uproot'): infile.Close() return root_hist + @staticmethod + def cnv_root_to_np(histo): + """ + Extract values from ROOT histogram to enable analysis or plotting with other tools. + + Args: + hist (TH1): Input ROOT histogram. + + """ + nbins = histo.GetNbinsX() + xvals = np.array([histo.GetBinCenter(x+1) for x in range(nbins+1)]) + yvals = np.array([histo.GetBinContent(x+1) for x in range(nbins+1)]) + errors = np.array([histo.GetBinError(x+1) for x in range(nbins+1)]) + underflow = histo.GetBinContent(0) + overflow = histo.GetBinContent(nbins+1) + + #add over/underflow + xvals = np.insert(xvals, 0, xvals[0]-histo.GetBinWidth(1)) + yvals = np.insert(yvals, 0, underflow) + xvals = np.append(xvals, xvals[-1]+histo.GetBinWidth(1)) + yvals = np.append(yvals, overflow) + errors = np.insert(errors, 0, 0.0) + errors = np.append(errors, 0.0) + + #get fit function if it exist + x_fit = None + y_fit = None + if len(histo.GetListOfFunctions()) > 0: + fitfunc = histo.GetListOfFunctions()[0] + x_fit = np.linspace(fitfunc.GetXmin(), fitfunc.GetXmax(), int((fitfunc.GetXmax()-fitfunc.GetXmin())/histo.GetBinWidth(1))) + y_fit = np.array([fitfunc.Eval(x) for x in x_fit]) + + return (xvals, yvals, errors), (x_fit, y_fit) + + @staticmethod + def cnv_tgraph_to_np(tgraph): + """ + Extract values from ROOT Tgraph. + + Args: + tgraph (TGraph): Input ROOT Tgraph + """ + # Number of points in the TGraph + npoints = tgraph.GetN() + + # Retrieve X and Y values + xvals = np.array([tgraph.GetX()[i] for i in range(npoints)]) + yvals = np.array([tgraph.GetY()[i] for i in range(npoints)]) + + #Errors not available in standard TGraph, set to zero. + errors = np.zeros(npoints) + + # Handle fit function if it exists + x_fit = None + y_fit = None + if len(tgraph.GetListOfFunctions()) > 0: + fitfunc = tgraph.GetListOfFunctions()[0] + x_fit = np.linspace(fitfunc.GetXmin(), fitfunc.GetXmax(), 100) # 100 points for the fit + y_fit = np.array([fitfunc.Eval(x) for x in x_fit]) + + return (xvals, yvals, errors), (x_fit, y_fit) + + + @staticmethod + def fit_plot_with_poly(plot, tgraph=False, specify_n=None, set_xrange=False, xrange=(0.0, 1.0)): + """ + Fit TH1 or TGraph with polynomial fit function. Uses fstat test to guide choice of polynomial degree. + + Args: + plot (TH1 or TGraph): Input plot to be fitted. + tgraph (boolean, optional): If True, access npoints through TGraph method. + specify_n (int, optional): If None, fit plot using all n degrees, calculate and print fstat for each n. + If int, fit plot using n degree polynomial. + set_xrange (boolean, optional): If True, set fit range according to xrange. + xrange (float, float, optional): Define fit range. + + Returns: + params (list): List of fit function parameters. + errors (list): List of fit parameter errors. + + Notes: + If trying to determine what order polynomial to fit plot, specify_n=None. Function will print out fstat results. + Once best order determined, set specifcy_n= to get fit resulst. + """ + polys = [] + chi2s = [] + fstats = [] + fit_resultults = [] + + if tgraph: + npoints = plot.GetN() + else: + npoints = 0 + nBins = plot.GetNbinsX() + for ibin in range(nBins): + if plot.GetBinContent(ibin) > 0: + npoints += 1 + pass + + + if not specify_n: + for n in range(11): + fitfunc = r.TF1(f'pol{n}',f'pol{n}') + fitfunc.SetLineColor(r.kRed) + if set_xrange: + fitfunc.SetRange(xrange[0], xrange[1]) + fit_result = plot.Fit(fitfunc,"RSQ") + else: + fit_result = plot.Fit(fitfunc,"SQ") + fitfunc.SetLineColor(r.kRed) + fitfunc.SetMarkerSize(0.0) + chi2s.append(fit_result.Chi2()) + polys.append(n) + fit_resultults.append(fit_result) + + #Perform fstat test to see how much fit improves with additional order + if n > 0: + fstats.append( (chi2s[n-1]-chi2s[n])*(npoints-n-1)/(chi2s[n])) + else: + fstats.append(0.0) + + print(fstats) + return None, None + else: + fitfunc = r.TF1(f'pol{specify_n}',f'pol{specify_n}') + fitfunc.SetLineColor(r.kRed) + fitfunc.SetLineWidth(5) + if set_xrange: + fitfunc.SetRange(xrange[0], xrange[1]) + fit_result = plot.Fit(fitfunc,"RSQ") + else: + fit_result = plot.Fit(fitfunc,"SQ") + params = fit_result.Parameters() + errors = fit_result.Errors() + #return fit_result + return params, errors + + + def systematic_uncertainties(self): - #using a +- 1.5 sigma search window with all cuts frozen to 2016 displaced simp dissertation (Alic) + """ + This method contains all of the systematic uncertainties applied in Alic's 2016 SIMP L1L1 dissertation. + The detector misalignment systematic is not included as of 09/06/2024. + These functions and values were calculated externally using a +- 1.5 sigma search window, with cuts frozen per dissertation. + All polynomials are a function of A' mass. + """ self.radacc_targetz_nominal = self.polynomial(0.24083419, -0.017612076, 0.00037553660, -1.0223921e-06, -3.8793240e-08, 4.2199609e-10, -1.6641414e-12, 2.3433278e-15) self.radacc_targetz_Mpt5 = self.polynomial(0.22477846, -0.015984559, 0.00030943435, 3.6182165e-07, -5.4820194e-08, @@ -322,6 +738,15 @@ def systematic_uncertainties(self): self.radfrac = 0.07 def evaluate_polynomials(self, mass): + """ + Returns the systematic uncertainties defined in systematic_uncertainties() as a function of mass. + + Args: + mass (float): A' mass. + + Note: + You need to understand each systematic in order to correctly combine these numbers. + """ nominal_values = self.radacc_targetz_nominal(mass) Mpt5_values = self.radacc_targetz_Mpt5(mass) Ppt5_values = self.radacc_targetz_Ppt5(mass) @@ -331,30 +756,50 @@ def evaluate_polynomials(self, mass): @staticmethod def inject_signal_mc(signal, data, nevents=100): - #Find the maximum signal weight + """ + Randomly selects MC signal events and injects them into data array. + + Args: + signal (awkward array): MC signal array loaded using load_signal(). + data (awwkard array): Data array loaded using load_data(). + nevents (int): Specify number of MC signal events to inject into data. + + Returns: + injected_data (awkward array): Copy of data array with injected signal. + thrown_events (int): Number of MC signal events that were thrown. Useful sanity check. + """ + # Identify the maximum signal event weight in the MC signal array max_weight = np.max(signal.expected_signal_weight) + + # Randomly sample the MC signal array until the specified number of events is thrown events_thrown = 0 thrown_mask = [] - #sample signal until requested nevents thrown while events_thrown < nevents: - #Randomly select a signal event + + # Randomly select a signal event from the array rint = np.random.randint(0,len(signal.expected_signal_weight)-1) random_event = signal[rint] - #Randomly sample the weight distribution. If the sampled weight < event weight, throw the event + + # Randomly sample the uniform distribution between 0-maximum signal event weight + # If the uniform sample weight is less than the randomly selected event weight, thrown the event rweight = np.random.uniform(0, max_weight) if rweight < random_event.expected_signal_weight: events_thrown += 1 thrown_mask.append(rint) - thrown_events = signal[thrown_mask] thrown_events['weight'] = 1.0 - #combine mass and min z0 into array and inject into data + # Inject the randomly selected signal events into the data by combining the awkward arrays injected_data = ak.concatenate([data, thrown_events]) return injected_data, thrown_events +#======================================================================================================================================= +# MAIN: Calculate the expected signal +#======================================================================================================================================= if __name__ == '__main__': + + #parse input arguments parser = argparse.ArgumentParser(description='Process some inputs.') parser.add_argument('--outfilename', type=str, default='expected_signal_output.root') parser.add_argument('--mpifpi', type=float, default=4*np.pi) @@ -370,7 +815,6 @@ def inject_signal_mc(signal, data, nevents=100): tenpct = args.tenpct - #Create MC signal analysis tuple processor print('Initialize signal processor') processor = SignalProcessor(mpifpi=mpifpi, nsigma=nsigma) @@ -415,7 +859,6 @@ def inject_signal_mc(signal, data, nevents=100): for signal_mass in masses: #Load MC Signal - #indir = '/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/beam/smeared' indir = '/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/beam/smeared_fixbeamspot' signal_pre_readout_path = lambda mass: f'/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/nobeam/mass_{mass}_simp_2pt3_slic_hadd_ana.root' signal_path = lambda mass: f'{indir}/mass_{mass}_hadd-simp-beam_ana_smeared_corr.root' @@ -435,8 +878,8 @@ def inject_signal_mc(signal, data, nevents=100): minz0_sel = processor.minz0_sel(signal) masswindow_sel = processor.mass_sel(signal, signal_mass) sameside_sel = processor.sameside_z0_cut(signal) + # Combine selections tight_sel = np.logical_and.reduce([psum_sel,zcut_sel, vprojsig_sel, psum_sel, minz0_sel, masswindow_sel, sameside_sel]) - #tight_sel = processor.tight_selection(signal, signal_mass) for l, eps2 in enumerate(eps2_range): signal = processor.get_exp_sig_eps2(signal_mass, signal, eps2) diff --git a/plotUtils/simps/systematics/radiative_acceptance_systematic.py b/plotUtils/simps/systematics/radiative_acceptance_target_position_systematic.py similarity index 53% rename from plotUtils/simps/systematics/radiative_acceptance_systematic.py rename to plotUtils/simps/systematics/radiative_acceptance_target_position_systematic.py index 14adcb640..854e069ba 100644 --- a/plotUtils/simps/systematics/radiative_acceptance_systematic.py +++ b/plotUtils/simps/systematics/radiative_acceptance_target_position_systematic.py @@ -1,9 +1,4 @@ -#!/usr/bin/env python -# coding: utf-8 - -# In[1]: - - +#!/usr/bin/python3 import os import awkward as ak import numpy as np @@ -12,121 +7,37 @@ import uproot import ROOT as r import copy - import matplotlib.pyplot as plt import matplotlib as mpl import mplhep import matplotlib.gridspec as gridspec - import sys -sys.path.append('/sdf/group/hps/user-data/alspellm/2016/plotting') -import hps_plot_utils as utils - -get_ipython().run_line_magic('matplotlib', 'inline') -mpl.style.use(mplhep.style.ROOT) import math -import pickle -sys.path.append('/sdf/home/a/alspellm/src/hpstr_v62208/plotUtils/simps') +hpstr_base = os.getenv('HPSTR_BASE') +sys.path.append(f'{hpstr_base}/plotUtils/simps') import simp_signal_2016 -from simp_theory_equations import SimpEquations as simpeqs -import copy -# Set global font sizes -plt.rcParams.update({'font.size': 50, # Font size for text - 'axes.titlesize': 50, # Font size for titles - 'axes.labelsize': 50, # Font size for axis labels - 'xtick.labelsize': 50, # Font size for x-axis tick labels - 'ytick.labelsize': 50, # Font size for y-axis tick labels - 'lines.linewidth':4.0, - 'legend.fontsize': 50}) # Font size for legend -plt.rcParams['font.family'] = 'DejaVu Sans' - -def cnv_root_to_np(histo): - nbins = histo.GetNbinsX() - xvals = np.array([histo.GetBinCenter(x+1) for x in range(nbins+1)]) - yvals = np.array([histo.GetBinContent(x+1) for x in range(nbins+1)]) - errors = np.array([histo.GetBinError(x+1) for x in range(nbins+1)]) - underflow = histo.GetBinContent(0) - overflow = histo.GetBinContent(nbins+1) - - #add over/underflow - xvals = np.insert(xvals, 0, xvals[0]-histo.GetBinWidth(1)) - yvals = np.insert(yvals, 0, underflow) - xvals = np.append(xvals, xvals[-1]+histo.GetBinWidth(1)) - yvals = np.append(yvals, overflow) - errors = np.insert(errors, 0, 0.0) - errors = np.append(errors, 0.0) - - #get fit function if it exist - x_fit = None - y_fit = None - if len(histo.GetListOfFunctions()) > 0: - fitfunc = histo.GetListOfFunctions()[0] - x_fit = np.linspace(fitfunc.GetXmin(), fitfunc.GetXmax(), int((fitfunc.GetXmax()-fitfunc.GetXmin())/histo.GetBinWidth(1))) - y_fit = np.array([fitfunc.Eval(x) for x in x_fit]) - - return (xvals, yvals, errors), (x_fit, y_fit) - -def fit_plot_with_poly(plot, tgraph=False, specify_n=None, set_xrange=False, xrange=(0.0, 1.0)): - polys = [] - chi2s = [] - fstats = [] - fit_resultults = [] - - if tgraph: - npoints = plot.GetN() - else: - npoints = 0 - nBins = plot.GetNbinsX() - for ibin in range(nBins): - if plot.GetBinContent(ibin) > 0: - npoints += 1 - pass - - - if not specify_n: - for n in range(11): - fitfunc = r.TF1(f'pol{n}',f'pol{n}') - fitfunc.SetLineColor(r.kRed) - if set_xrange: - fitfunc.SetRange(xrange[0], xrange[1]) - fit_result = plot.Fit(fitfunc,"RSQ") - else: - fit_result = plot.Fit(fitfunc,"SQ") - fitfunc.SetLineColor(r.kRed) - fitfunc.SetMarkerSize(0.0) - chi2s.append(fit_result.Chi2()) - polys.append(n) - fit_resultults.append(fit_result) - #Perform fstat test to see how much fit improves with additional order (why does this work?) - if n > 0: - fstats.append( (chi2s[n-1]-chi2s[n])*(npoints-n-1)/(chi2s[n])) - else: - fstats.append(0.0) - - print(fstats) - return None, None - else: - fitfunc = r.TF1(f'pol{specify_n}',f'pol{specify_n}') - fitfunc.SetLineColor(r.kRed) - fitfunc.SetLineWidth(5) - if set_xrange: - fitfunc.SetRange(xrange[0], xrange[1]) - fit_result = plot.Fit(fitfunc,"RSQ") - else: - fit_result = plot.Fit(fitfunc,"SQ") - params = fit_result.Parameters() - errors = fit_result.Errors() - #return fit_result - return params, errors - - -# In[2]: - - -signalProcessor = simp_signal_2016.SignalProcessor(np.pi*4., 1.5) -#V0 Projection Significance Data vs MC efficiency +#format mpl plots +plt.rcParams.update({'font.size': 40, # Font size for text + 'axes.titlesize': 40, # Font size for titles + 'axes.labelsize': 40, # Font size for axis labels + 'xtick.labelsize': 40, # Font size for x-axis tick labels + 'ytick.labelsize': 40, # Font size for y-axis tick labels + 'lines.linewidth':3.0, + 'legend.fontsize': 40}) # Font size for legend +plt.rcParams['font.family'] = 'DejaVu Sans' + +import argparse +parser = argparse.ArgumentParser(description='') +parser.add_argument('--outdir', type=str, default='./search_results') +parser.add_argument('--mpifpi', type=float, default=4.*np.pi) + +args = parser.parse_args() +outdir = args.outdir +################################################################################################################################ +search_window = 1.5 +signalProcessor = simp_signal_2016.SignalProcessor(args.mpifpi, search_window) samples = {} mcsamples = {} @@ -135,7 +46,7 @@ def fit_plot_with_poly(plot, tgraph=False, specify_n=None, set_xrange=False, xra #LOAD NOMINAL RAD + BEAM #rad+beam infile = '/sdf/group/hps/user-data/alspellm/2016/rad_mc/pass4b/rad_beam/rad-beam-hadd-10kfiles-ana-smeared-corr.root' -selection = 'vtxana_radMatchTight_nocuts' #USE RADMATCHTIGHT! +selection = 'vtxana_radMatchTight_nocuts' samples['nominal_beam'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.9) & (unc_vtx_psum < 2.4) )', expressions=branches) #mc ana infile = '/sdf/group/hps/user-data/alspellm/2016/rad_mc/pass4b/rad_nobeam/rad_nobeam_slic_hadd10ktuples_ana.root' @@ -143,7 +54,6 @@ def fit_plot_with_poly(plot, tgraph=False, specify_n=None, set_xrange=False, xra mcsamples['nominal_beam'] = copy.deepcopy(slicfile.Get('mcAna/mcAna_mc622Mass_h')) slicfile.Close() - #LOAD NOMINAL RAD + BEAM Mpt5 #rad+beam infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/hadd_1937files_rad_beam_targetz_Mpt5_recon_ana.root' @@ -166,28 +76,8 @@ def fit_plot_with_poly(plot, tgraph=False, specify_n=None, set_xrange=False, xra mcsamples['targetz_Ppt5_beam'] = copy.deepcopy(slicfile.Get('mcAna/mcAna_mc622Mass_h')) slicfile.Close() -''' -#LOAD NOMINAL RAD NO BEAM -infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/hadd_1999files_rad_nobeam_nominal_recon_ana.root' -selection = 'vtxana_radMatchTight_nocuts' #USE RADMATCHTIGHT! -samples['nominal_nobeam'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.9) & (unc_vtx_psum < 2.4) )', expressions=branches) -#mc ana -infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/hadd_2kfiles_rad_nobeam_nominal_mc_ana.root' -slicfile = r.TFile(infile, "READ") -mcsamples['nominal_nobeam'] = copy.deepcopy(slicfile.Get('mcAna/mcAna_mc622Mass_h')) -slicfile.Close() -''' - - -# In[ ]: - - - - - -# In[42]: - +#init invariant mass plot nbinsx = mcsamples['nominal_beam'].GetNbinsX() first_bin = mcsamples['nominal_beam'].GetBinLowEdge(1) last_bin = nbinsx*mcsamples['nominal_beam'].GetBinWidth(1) @@ -206,10 +96,6 @@ def fit_plot_with_poly(plot, tgraph=False, specify_n=None, set_xrange=False, xra invmass_histos[sname].Rebin(2) mcsamples[sname].Rebin(2) - -# In[43]: - - def nonUniBinning(histo, start, size): edges_a = np.arange(histo.GetBinLowEdge(1),start+histo.GetBinWidth(1),histo.GetBinWidth(1)) edges_b = np.arange(start,histo.GetBinLowEdge(histo.GetNbinsX()), size) @@ -229,9 +115,6 @@ def nonUniBinning(histo, start, size): # mcsamples[sname] = nonUniBinning(mcsamples[sname], 150, 4) -# In[45]: - - #calculate radiative acceptance fits = {} colors = ['#d62728', '#bcbd22', '#2ca02c', '#17becf', '#1f77b4', '#9467bd', '#7f7f7f'] @@ -244,9 +127,9 @@ def nonUniBinning(histo, start, size): for i,(sname, histo) in enumerate(invmass_histos.items()): ratio = invmass_histos[sname].Clone() ratio.Divide(mcsamples[sname]) - fit_params,_ = fit_plot_with_poly(ratio, specify_n=7, set_xrange=True, xrange=(30.0, 220.0)) + fit_params,_ = signalProcessor.fit_plot_with_poly(ratio, specify_n=7, set_xrange=True, xrange=(30.0, 220.0)) print(sname, fit_params) - (xvals, yvals, errors), (x_fit, y_fit) = cnv_root_to_np(ratio) + (xvals, yvals, errors), (x_fit, y_fit) = signalProcessor.cnv_root_to_np(ratio) plt.errorbar(xvals, yvals, yerr=errors, linestyle='', marker='o', color=colors[i], label=labels[i]) plt.plot(x_fit, y_fit, linewidth=3.0, color=colors[i]) fits[sname] = (x_fit, y_fit) @@ -269,16 +152,4 @@ def nonUniBinning(histo, start, size): plt.ylabel('Ratio') plt.legend() -plt.savefig('radiative_acceptance_target_deltaz.png') - - #c = r.TCanvas('f{sname}', 'f{sname}', 2000, 1000) - #c.cd() - #ratio.Draw() - #c.Draw() - - -# In[ ]: - - - - +plt.savefig(f'{outdir}/radiative_acceptance_target_deltaz.png') diff --git a/plotUtils/simps/systematics/v0projsig_systematic.py b/plotUtils/simps/systematics/v0projsig_systematic.py index d38b19439..1f6b1ed0b 100644 --- a/plotUtils/simps/systematics/v0projsig_systematic.py +++ b/plotUtils/simps/systematics/v0projsig_systematic.py @@ -1,9 +1,4 @@ -#!/usr/bin/env python -# coding: utf-8 - -# In[1]: - - +#!/usr/bin/python3 import os import awkward as ak import numpy as np @@ -12,42 +7,41 @@ import uproot import ROOT as r import copy - import matplotlib.pyplot as plt import matplotlib as mpl import mplhep import matplotlib.gridspec as gridspec - import sys -sys.path.append('/sdf/group/hps/user-data/alspellm/2016/plotting') -import hps_plot_utils as utils - -get_ipython().run_line_magic('matplotlib', 'inline') -mpl.style.use(mplhep.style.ROOT) import math -import pickle -sys.path.append('/sdf/home/a/alspellm/src/hpstr/plotUtils/simps') +hpstr_base = os.getenv('HPSTR_BASE') +sys.path.append(f'{hpstr_base}/plotUtils/simps') import simp_signal_2016 -from simp_theory_equations import SimpEquations as simpeqs -import copy -# Set global font sizes -plt.rcParams.update({'font.size': 60, # Font size for text - 'axes.titlesize': 60, # Font size for titles - 'axes.labelsize': 60, # Font size for axis labels - 'xtick.labelsize': 60, # Font size for x-axis tick labels - 'ytick.labelsize': 60, # Font size for y-axis tick labels - 'lines.linewidth':5.0, - 'legend.fontsize': 60}) # Font size for legend -plt.rcParams['font.family'] = 'DejaVu Sans' - - -# In[3]: - - -signalProcessor = simp_signal_2016.SignalProcessor(np.pi*4., 1.5) -#V0 Projection Significance Data vs MC efficiency +#format mpl plots +plt.rcParams.update({'font.size': 40, # Font size for text + 'axes.titlesize': 40, # Font size for titles + 'axes.labelsize': 40, # Font size for axis labels + 'xtick.labelsize': 40, # Font size for x-axis tick labels + 'ytick.labelsize': 40, # Font size for y-axis tick labels + 'lines.linewidth':3.0, + 'legend.fontsize': 40}) # Font size for legend +plt.rcParams['font.family'] = 'DejaVu Sans' + +import argparse +parser = argparse.ArgumentParser(description='') +parser.add_argument('--outdir', type=str, default='./search_results') +parser.add_argument('--mpifpi', type=float, default=4.*np.pi) + +args = parser.parse_args() +outdir = args.outdir +####################################################################################################################################### + +#Load signal processor +search_window = 1.5 #used in final search +signalProcessor = simp_signal_2016.SignalProcessor(args.mpifpi, search_window) + +#Read in data and MC bkg samples = {} branches = ["unc_vtx_proj_sig","unc_vtx_ele_track_z0","unc_vtx_pos_track_z0"] @@ -57,18 +51,16 @@ samples['data'] = signalProcessor.load_data(infile,selection, expressions=branches, cut_expression='((unc_vtx_psum > 1.0) & (unc_vtx_psum < 1.9) )') samples['data']['weight'] = 1.0 #Assign weight of 10 to scale up to full lumi +#Load MC background lumi = 10.7*.1 #pb-1 mc_scale = {'tritrig' : 1.416e9*lumi/(50000*10000), 'wab' : 0.1985e12*lumi/(100000*10000)} - #Load tritrig -infile = '/sdf/group/hps/user-data/alspellm/2016/tritrig_mc/pass4b/tritrig-beam-hadd-10kfiles-ana-smeared-corr.root' infile = '/sdf/group/hps/user-data/alspellm/2016/tritrig_mc/pass4b/hadded_tritrig-beam-10kfiles-ana-smeared-corr_beamspotfix.root' samples['tritrig'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.2) & (unc_vtx_psum < 1.9) )', expressions=branches) samples['tritrig']['weight'] = mc_scale['tritrig'] #Load wab -infile = '/sdf/group/hps/user-data/alspellm/2016/wab_mc/pass4b/wab-beam-hadd-10kfiles-ana-smeared-corr.root' infile = '/sdf/group/hps/user-data/alspellm/2016/wab_mc/pass4b/hadded_wab-beam-10kfiles-ana-smeared-corr_beamspotfix.root' samples['wab'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.2) & (unc_vtx_psum < 1.9) )', expressions=branches) samples['wab']['weight'] = mc_scale['wab'] @@ -76,10 +68,7 @@ #Combine tritrig and wab samples['tritrig+wab+beam'] = ak.concatenate([samples['tritrig'], samples['wab']]) - -# In[4]: - - +#init histogram of v0 projection significance values to compare data and MC background v0projsig_h = ( hist.Hist.new .StrCategory(list(samples.keys()), name='samples') @@ -103,11 +92,4 @@ plt.legend() plt.ylabel('Normalized Events') plt.yscale('log') -plt.savefig('v0projsig_systematic_lowpsum.png') - - -# In[ ]: - - - - +plt.savefig(f'{outdir}/v0projsig_systematic_lowpsum.png') From fd2ebc6d482285d6e2317f09a4156803034dfad3 Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Mon, 9 Sep 2024 10:48:53 -0700 Subject: [PATCH 24/27] documenting --- plotUtils/simps/run_opt_interval.py | 147 ++++++++++++++++----------- plotUtils/simps/run_signal_search.py | 5 - plotUtils/simps/simp_signal_2016.py | 25 ++--- 3 files changed, 101 insertions(+), 76 deletions(-) diff --git a/plotUtils/simps/run_opt_interval.py b/plotUtils/simps/run_opt_interval.py index b125270a2..f9d24a72d 100644 --- a/plotUtils/simps/run_opt_interval.py +++ b/plotUtils/simps/run_opt_interval.py @@ -12,7 +12,9 @@ import copy import pickle -############################################################################################# +#======================================================================================================================================= +# FUNCTIONS +#======================================================================================================================================= def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): """ Returns a list of the sizes of the K-largest intervals in that run according to the energy spectrum (given as a CDF). @@ -34,7 +36,15 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): answer[interval_size] = np.max(gap_sizes) return answer -########################################################################################### +#======================================================================================================================================= +# INITIALIZE +#======================================================================================================================================= +# --outfilename: Specify output file name. +# --tenpct: If True run OIM on 10% data (or single hpstr vertex ana output tuple). +# --highPsum: If True, run OIM in high Psum (CR). +# --mpifpi: Ratio of dark pion mass to dark pion decay constant (benchmarks are 3 and 4pi). +# --signal_sf: Scale the signal. +# --nsigma: Size of the signal invariant mass search window (+-nsigma) import argparse parser = argparse.ArgumentParser(description='Process some inputs.') @@ -53,14 +63,16 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): tenpct = args.tenpct print(f'Search Window Size: +-', nsigma) +#======================================================================================================================================= +# LOAD DATA +#======================================================================================================================================= #Initialize Signal Processor signalProcessor = simp_signal_2016.SignalProcessor(mpifpi=mpifpi, nsigma=nsigma) -#Load Data data = ak.Array([]) if args.tenpct: + # If tenpct True, run OIM on 10% data (or a single file) outfilename = f'{outfilename}_10pct' - #Load 10% data signal region inv_mass_range = (30,124) print('Loading 10% Data') branches = ["unc_vtx_mass","unc_vtx_psum", "unc_vtx_ele_track_z0", "unc_vtx_pos_track_z0", "unc_vtx_z", "unc_vtx_proj_sig"] @@ -70,23 +82,26 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): data['weight'] = 1.0 else: + # Run OIM on 100% data (multiple files in a single directory) outfilename = f'{outfilename}_100pct' - #Load 100% data print('Loading 100% Data') inv_mass_range = (30,200) branches = ["unc_vtx_mass","unc_vtx_psum", "unc_vtx_ele_track_z0", "unc_vtx_pos_track_z0", "unc_vtx_z", "unc_vtx_proj_sig"] indir = '/fs/ddn/sdf/group/hps/users/alspellm/data_storage/pass4kf/pass4kf_ana_20240513' - #If high psum, can look at all masses + + # If highPsum is True, can look at all masses if args.highPsum: selection = 'vtxana_Tight_2016_simp_reach_CR' mass_safety = 'unc_vtx_mass*1000. >= 0' + + # If highPsum is False, can only look at 10% data before unblinding else: selection = 'vtxana_Tight_2016_simp_reach_SR' #mass_safety = 'unc_vtx_mass*1000. > 135' #CANT LOOK BELOW THIS MASS UNTIL UNBLINDING! - #inv_mass_range = (135,200) mass_safety = 'unc_vtx_mass*1000. > 0.0' #UNBLINDED! inv_mass_range = (30, 124) + # Loop over all input files and combine into single data array for filename in sorted(os.listdir(indir)): if not filename.endswith('.root'): continue @@ -97,18 +112,19 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): data['weight'] = 1.0 -#Set the differential radiative trident rate lookup table used to scale expected signal +# Load the differential radiative trident rate lokup table. This scales the expected signal to the data print('Load lookup table') -cr_data = '/sdf/group/hps/user-data/alspellm/2016/data/hadd_BLPass4c_1959files.root' -full_lumi_path = '/fs/ddn/sdf/group/hps/users/alspellm/data_storage/pass4kf/pass4kf_ana_20240513' +cr_data = '/sdf/group/hps/user-data/alspellm/2016/data/hadd_BLPass4c_1959files.root' # If using 10% data. +full_lumi_path = '/fs/ddn/sdf/group/hps/users/alspellm/data_storage/pass4kf/pass4kf_ana_20240513' # If using 100% data. preselection = "vtxana_Tight_nocuts" signal_mass_range = [x for x in range(20,130,1)] signalProcessor.set_diff_prod_lut(cr_data, preselection, signal_mass_range, tenpct, full_lumi_path) -#Initialize the range of epsilon2 -#masses = [x for x in range(50,56,2)] +#======================================================================================================================================= +# INITIALIZE HISTOGRAMS +#======================================================================================================================================= + masses = [x for x in range(inv_mass_range[0], inv_mass_range[-1]+2,2)] -#masses = [x for x in range(68,100, 2)] ap_masses = [round(x*signalProcessor.mass_ratio_ap_to_vd,1) for x in masses] eps2_range = np.logspace(-4.0,-8.0,num=1000) logeps2_range = np.log10(eps2_range) @@ -116,7 +132,6 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): max_eps = max(np.log10(eps2_range)) num_bins = len(eps2_range) -#make histos to store results exclusion_conf_h = ( hist.Hist.new .Reg(len(masses)-1, np.min(masses),np.max(masses),label='v_{D} Invariant Mass [MeV]') @@ -182,23 +197,25 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): .Double() ) -####################################################################################################################################### +#======================================================================================================================================= +# RUN OPTIMUM INTERVAL METHOD +#======================================================================================================================================= -#Load lookup table -lookuptable_path = '/fs/ddn/sdf/group/hps/users/alspellm/mc_storage/opt_int_lookuptable_large.p' -lookuptable_path = '/sdf/home/a/alspellm/src/hpstr/plotUtils/simps/interval_ntrials_10000.p' -#lookuptable_path = '/fs/ddn/sdf/group/hps/users/alspellm/mc_storage/opt_int_lookuptable_max50_10ktoys.p' +# Load OIM lookup table generated using cmax.py lookuptable_path = '/fs/ddn/sdf/group/hps/users/alspellm/mc_storage/opt_int_lookuptable_max25_10ktoys_0.05steps_v2.p' -ntrials = 10000 #number of toy events thrown for each mu in lookup table + +# Number of toy events thrown for each mu in the loaded lookup table +ntrials = 10000 # This value is defined in cmax.py when the lookup table is generated. It MUST MATCH the value used. with open(lookuptable_path, 'rb') as f: - # Load the object from the pickle file lookupTable = pickle.load(f) -#open output file +# Open an output file to store the results outfile = uproot.recreate(f'{outfilename}.root') +# Run OIM for each MC generated signal mass (30-124 @ 2 MeV intervals) for signal_mass in masses: - #Histograms for each mass + + # Initialize histograms confidence_level_mass_h = ( hist.Hist.new .Reg(300, 0, 30.0,label='mu') @@ -214,17 +231,18 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): print(f'Signal Mass {signal_mass}') - #Set signal window + # Define the invariant mass search window boundaries based on the search window size mass_low = signal_mass - signalProcessor.mass_resolution(signal_mass)*nsigma mass_high = signal_mass + signalProcessor.mass_resolution(signal_mass)*nsigma - #Build the selection for data - zcut_sel = signalProcessor.zcut_sel(data) - vprojsig_sel = signalProcessor.vprojsig_sel(data) - minz0_sel = signalProcessor.minz0_sel(data) - sameside_sel = signalProcessor.sameside_z0_cut(data) - masswindow_sel = signalProcessor.mass_sel(data, signal_mass) - #Set signal/control region + # Build the final selection used in the signal search and apply to the data + zcut_sel = signalProcessor.zcut_sel(data) # zcut on target position at -4.3 mm. + vprojsig_sel = signalProcessor.vprojsig_sel(data) # Require target projected vertex significance < 2.0 + minz0_sel = signalProcessor.minz0_sel(data) # Cut on minimum track vertical impact parameter z0 (aka y0) + sameside_sel = signalProcessor.sameside_z0_cut(data) # Cut events where both tracks have same side z0 + masswindow_sel = signalProcessor.mass_sel(data, signal_mass) # Define search window mass boundaries + + # Set the Psum selection if not args.tenpct and args.highPsum: psum_sel = signalProcessor.psum_sel(data, case='cr') elif args.tenpct and not args.highPsum: @@ -233,33 +251,39 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): psum_sel = signalProcessor.psum_sel(data, case='cr') else: psum_sel = signalProcessor.psum_sel(data, case='sr') - print('UNBLINDED!') + + # Combine the selections and apply to data tight_sel = np.logical_and.reduce([zcut_sel, vprojsig_sel, sameside_sel, psum_sel, minz0_sel, masswindow_sel]) - #tight_sel = np.logical_and.reduce([zcut_sel, vprojsig_sel, psum_sel, masswindow_sel]) data_z = data[tight_sel].unc_vtx_z - print(data_z) - #Load MC Signal - #indir = '/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/beam/smeared' + #================================================================================================================================== + # LOAD MC SIGNAL + #================================================================================================================================== + indir = '/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/beam/smeared_fixbeamspot' + + # hpstr MC ana processor output file that stores the pre-readout MC signal truth information signal_pre_readout_path = lambda mass: f'/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/nobeam/mass_{mass}_simp_2pt3_slic_hadd_ana.root' + # hpstr vertex ana processor output tuple signal_path = lambda mass: f'{indir}/mass_{mass}_hadd-simp-beam_ana_smeared_corr.root' signal_selection = 'vtxana_radMatchTight_2016_simp_SR_analysis' - #Get the total signal yield as a function of eps2 + # Calculate the total A' production rate per epsilon^2 total_yield_per_epsilon2 = signalProcessor.total_signal_production_per_epsilon2(signal_mass) print('Total Yield Per eps2: ', total_yield_per_epsilon2) + # Load signal before tight selection print('Load Signal ', signal_path(signal_mass)) signal = signalProcessor.load_signal(signal_path(signal_mass), signal_pre_readout_path(signal_mass), signal_mass, signal_selection) - #Build the selection for signal + # Build final selection for signal zcut_sel = signalProcessor.zcut_sel(signal) vprojsig_sel = signalProcessor.vprojsig_sel(signal) minz0_sel = signalProcessor.minz0_sel(signal) sameside_sel = signalProcessor.sameside_z0_cut(signal) masswindow_sel = signalProcessor.mass_sel(signal, signal_mass) - #Set signal/control region + + # Set the Psum selection if not args.tenpct and args.highPsum: psum_sel = signalProcessor.psum_sel(signal, case='cr') elif args.tenpct and not args.highPsum: @@ -269,11 +293,14 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): else: psum_sel = signalProcessor.psum_sel(signal, case='sr') print('UNBLINDED!') + + # Combine the selections and apply to MC signal tight_sel = np.logical_and.reduce([zcut_sel, vprojsig_sel, sameside_sel, psum_sel, minz0_sel, masswindow_sel]) signal = signal[tight_sel] - #Loop over eps2 values and reweight the signal - print('Looping over eps2') + #================================================================================================================================== + # CALCULATE UPPER LIMIT ON SIGNAL: As function of epsilon^2 + #================================================================================================================================== for i, eps2 in enumerate(eps2_range): signal = signalProcessor.get_exp_sig_eps2(signal_mass, signal, eps2) total_yield = ak.sum(signal['reweighted_accxEff'])*total_yield_per_epsilon2*eps2 @@ -281,7 +308,8 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): print(f'eps2 = {eps2}') print(total_yield) - #Make signal efficiency in recon z + # Signal acceptance*efficiency*dark_vector_probability in reconstructed vertex z. + # This represents the shape of the signal in 1D. exp_sig_eff_z = ( hist.Hist.new .Reg(140, -40.0,100.0,label=r'Recon z [mm]') @@ -289,70 +317,74 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): ) exp_sig_eff_z.fill(signal.unc_vtx_z, weight=signal.reweighted_accxEff*total_yield_per_epsilon2*eps2) - #Convert the data to a uniform distribution in recon z, according to the expected signal distribution + # Convert the remaining events in data to a normalized uniform distribution in reconstructed vertex z according to signal shape. data_uniform_z = ( hist.Hist.new .Reg(101, -0.005,1.005,label=r'Recon z [mm]') .Double() ) + # Initialize an array to store the new data events that are transformed according to the signal shape. + # Add endpoints to the new uniform data array, 0 in front, and 1.0 and the end. dataArray = np.zeros(len(data_z)+2) dataArray[0] = 0.0 for k in range (0, len(data_z)): thisX = data_z[k] - dataArray[k+1] = total_yield - exp_sig_eff_z[hist.loc(thisX)::sum] + dataArray[k+1] = total_yield - exp_sig_eff_z[hist.loc(thisX)::sum] #transformation dataArray[len(data_z)+1] = total_yield - dataArray = dataArray/total_yield + dataArray = dataArray/total_yield # normalize distribution based on total signal rate dataArray = np.nan_to_num(dataArray, nan=1.0) dataArray[0] = 0.0 dataArray.sort() data_uniform_z.fill(dataArray) + # Calculate maximum gaps with k events allowed between events kints = kLargestIntervals(dataArray) - #Loop through lookup table to find confidence level + # Loop through the lookup table to find what upper limit on the signal rate results in 90% confidence mu_90p = 99999.9 k_90p = -1 conf_90p = -1.0 previous_mu = 999999.9 previous_conf = -9.9 + # Loop over values of mu (mean expected signal rate) for i,mu in enumerate(sorted(lookupTable.keys())): best_k = -1 - best_conf = -1.0 + best_conf = -1.0 # Store best confidence level across all values of k + + # Loop over all values of k (k events allowed in gap between data events) for k in sorted(lookupTable[mu].keys()): if k > len(kints)-1: break x = np.max(kints[k]) - conf = np.where(lookupTable[mu][k] < x)[0].size / (ntrials) + conf = np.where(lookupTable[mu][k] < x)[0].size / (ntrials) # Confidence level for this mu and k if conf > best_conf: best_k = k best_conf = conf - #debug histos + # Debug histos confidence_level_mass_h.fill(mu, np.log10(eps2), weight=best_conf) best_kvalue_mass_h.fill(mu, np.log10(eps2), weight=best_k) - #if the confidence is >= 90%, this is the upper limit + # If the condience level is >= 90%, this value of mu is the upper limit if best_conf >= 0.9: mu_90p = mu k_90p = best_k conf_90p = best_conf - #print(f'90% confidence upper limit on mu={mu_90p}, when k={k_90p}') - #print(f'Confidence level: ', conf_90p) - #fill debug histo. Check excluded signal value right before upper limit + # Fill debug histos excluded_signal_minus1_h.fill(signal_mass, np.log10(eps2), weight=previous_mu) exclusion_conf_minus1_h.fill(signal_mass, np.log10(eps2), weight=previous_conf) break - #debug. Track values just before upper limit is reached + # More debug previous_mu = mu previous_conf = best_conf - #Fill histogram results + # Fill OIM results in histograms exclusion_conf_h.fill(signal_mass, np.log10(eps2), weight=conf_90p) exclusion_bestk_h.fill(signal_mass, np.log10(eps2), weight=k_90p) total_yield_h.fill(signal_mass, np.log10(eps2), weight=total_yield) @@ -363,10 +395,11 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): excluded_signal_ap_h.fill(signalProcessor.mass_ratio_ap_to_vd*signal_mass, np.log10(eps2), weight=mu_90p) sensitivity_ap_h.fill(signalProcessor.mass_ratio_ap_to_vd*signal_mass, np.log10(eps2), weight=(total_yield/mu_90p)) - #save mass histograms + # Save mass dependent histograms outfile[f'masses/confidence_levels_{signal_mass}_h'] = confidence_level_mass_h outfile[f'masses/best_kvalues_{signal_mass}_h'] = best_kvalue_mass_h +# Save results across all masses outfile['total_yield_h'] = total_yield_h outfile['excluded_signal_h'] = excluded_signal_h outfile['sensitivity_h'] = sensitivity_h @@ -377,7 +410,7 @@ def kLargestIntervals(list_of_energies, spectrumCDF = lambda x: x): outfile['excluded_signal_ap_h'] = excluded_signal_ap_h outfile['sensitivity_ap_h'] = sensitivity_ap_h -#save debug plots +# Save debug plots outfile['excluded_signal_minus1_h'] = excluded_signal_minus1_h outfile['exclusion_conf_minus1_h'] = exclusion_conf_minus1_h diff --git a/plotUtils/simps/run_signal_search.py b/plotUtils/simps/run_signal_search.py index 1558ef88d..1e75aa4ee 100644 --- a/plotUtils/simps/run_signal_search.py +++ b/plotUtils/simps/run_signal_search.py @@ -196,11 +196,6 @@ def run_abcd_method(data, signal_mass): minz0_coeffs = signalProcessor.get_minz0_cut() min_z0_cut = signalProcessor.polynomial(minz0_coeffs[0],minz0_coeffs[1],minz0_coeffs[2])(signal_mass) - - #Determine the min z0 cut floor. The ratio of potential signal to background in region C should be so small - #as to be negligible, or else the expected background in region F will be overestimated due to signal contamination in C, - #and our ability to make a discovery will be dramatically reduced. - # Define the minimum z0 floor used to count events in regions B, C, and D. # This floor is defined so that the background estimate is weighted towards the tails of the minimum z0 distributions rather # than the core of the distribution. However, the tails cannot be so small that signal contamination is an issue in C. diff --git a/plotUtils/simps/simp_signal_2016.py b/plotUtils/simps/simp_signal_2016.py index 99d0722e7..955d10584 100644 --- a/plotUtils/simps/simp_signal_2016.py +++ b/plotUtils/simps/simp_signal_2016.py @@ -814,35 +814,30 @@ def inject_signal_mc(signal, data, nevents=100): outfilename = args.outfilename tenpct = args.tenpct - - #Create MC signal analysis tuple processor - print('Initialize signal processor') + # Initialize signal processor processor = SignalProcessor(mpifpi=mpifpi, nsigma=nsigma) - #Set the differential radiative trident rate lookup table used to scale expected signal - print('Load lookup table') + # Load either 10% data or 100% data, use the CR reconstructed bkg rate to scale the expected signal rate cr_data = '/sdf/group/hps/user-data/alspellm/2016/data/hadd_BLPass4c_1959files.root' full_lumi_path = '/fs/ddn/sdf/group/hps/users/alspellm/data_storage/pass4kf/pass4kf_ana_20240513' preselection = "vtxana_Tight_nocuts" signal_mass_range = [x for x in range(30,130,1)] processor.set_diff_prod_lut(cr_data, preselection, signal_mass_range, tenpct, full_lumi_path) - #Initialize the range of epsilon2 - mass_max = 50 + # Initialize the mass and epsilon^2 range for the expected signal calculation + mass_max = 124 mass_min = 30 - mass_step = 2 + mass_step = 2 # MC signal files were generated at 2 MeV increments ap_step = round(mass_step*processor.mass_ratio_ap_to_vd,1) masses = np.array([x for x in range(mass_min, mass_max+mass_step, mass_step)]) ap_masses = np.array([round(x*processor.mass_ratio_ap_to_vd,1) for x in masses]) - print(masses) - print(ap_masses) eps2_range = np.logspace(-4.0,-8.0,num=40) logeps2_range = np.log10(eps2_range) min_eps = min(np.log10(eps2_range)) max_eps = max(np.log10(eps2_range)) num_bins = len(eps2_range) - #Define all histograms + # Initialize the histograms used to store the expected signal. One for Vd mass, one for Ap mass. expected_signal_vd_h = ( hist.Hist.new .Reg(len(masses), np.min(masses), np.max(masses)+mass_step, label='Vd Invariant Mass [MeV]') @@ -856,6 +851,7 @@ def inject_signal_mc(signal, data, nevents=100): .Double() ) + # Calculate expected signal for each MC generated mass for signal_mass in masses: #Load MC Signal @@ -877,14 +873,15 @@ def inject_signal_mc(signal, data, nevents=100): vprojsig_sel = processor.vprojsig_sel(signal) minz0_sel = processor.minz0_sel(signal) masswindow_sel = processor.mass_sel(signal, signal_mass) - sameside_sel = processor.sameside_z0_cut(signal) + #sameside_sel = processor.sameside_z0_cut(signal) + # Combine selections - tight_sel = np.logical_and.reduce([psum_sel,zcut_sel, vprojsig_sel, psum_sel, minz0_sel, masswindow_sel, sameside_sel]) + #tight_sel = np.logical_and.reduce([psum_sel,zcut_sel, vprojsig_sel, psum_sel, minz0_sel, masswindow_sel, sameside_sel]) + tight_sel = np.logical_and.reduce([psum_sel,zcut_sel, vprojsig_sel, psum_sel, minz0_sel, masswindow_sel]) for l, eps2 in enumerate(eps2_range): signal = processor.get_exp_sig_eps2(signal_mass, signal, eps2) total_yield = signal_sf*ak.sum(signal['reweighted_accxEff'][tight_sel])*total_yield_per_epsilon2*eps2 - #print('Total Yield: ', total_yield) expected_signal_vd_h.fill(signal_mass, logeps2_range[l], weight=total_yield) expected_signal_ap_h.fill(signal_mass*processor.mass_ratio_ap_to_vd, logeps2_range[l], weight=total_yield) From d991e5c4372f5f73a23305464b2863dd8e8511a7 Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Mon, 9 Sep 2024 13:39:49 -0700 Subject: [PATCH 25/27] more documentation --- plotUtils/simps/run_opt_interval.py | 14 ++ .../simps/systematics/minz0_systematic.py | 101 +++++---- ...e_acceptance_target_position_systematic.py | 56 +++-- .../signal_target_position_systematic.py | 203 ++---------------- .../simps/systematics/v0projsig_systematic.py | 39 ++-- 5 files changed, 148 insertions(+), 265 deletions(-) diff --git a/plotUtils/simps/run_opt_interval.py b/plotUtils/simps/run_opt_interval.py index f9d24a72d..86349c19c 100644 --- a/plotUtils/simps/run_opt_interval.py +++ b/plotUtils/simps/run_opt_interval.py @@ -1,4 +1,18 @@ #!/usr/bin/python3 +""" +This script runs the optimum interval method to calculate a 90% confidence upper limit on the SIMP signal rate as a function of +mass and epsilon. + +Load in the data (in the form of the flat tuple output by the hpstr vertex analysis processor) and apply all selection criteria. + *The vertex ana processor applies Preselection, and a few Tight cuts, but you need apply all remaining tight cuts. +Load in the MC signal (for each generated mass), and apply all selection criteria. + +The reconstructed vertex z distribution of the remaining data events is transformed into a normalized uniform distribution according +to the expected MC signal shape in reconstructed vertex z. Since this shape is a function of epsilon^2, upper limit depends on both +mass and epsilon^2. + +Calculating the upper limit here requires an external lookuptable that is generated using cmax.py. +""" import os import awkward as ak import numpy as np diff --git a/plotUtils/simps/systematics/minz0_systematic.py b/plotUtils/simps/systematics/minz0_systematic.py index 7fb84ec02..5027b2967 100644 --- a/plotUtils/simps/systematics/minz0_systematic.py +++ b/plotUtils/simps/systematics/minz0_systematic.py @@ -1,4 +1,7 @@ #!/usr/bin/python3 +#======================================================================================================================================= +# Description: Calculates the systematic uncertainty associated with the minimum z0 (aka y0) cut. +# Systematics calculated using MC signal generated at 4 MeV intervals import os import awkward as ak import numpy as np @@ -14,20 +17,19 @@ import sys import math +# SIMP tools defined in hpstr hpstr_base = os.getenv('HPSTR_BASE') sys.path.append(f'{hpstr_base}/plotUtils/simps') import simp_signal_2016 -#format mpl plots -plt.rcParams.update({'font.size': 40, # Font size for text - 'axes.titlesize': 40, # Font size for titles - 'axes.labelsize': 40, # Font size for axis labels - 'xtick.labelsize': 40, # Font size for x-axis tick labels - 'ytick.labelsize': 40, # Font size for y-axis tick labels - 'lines.linewidth':3.0, - 'legend.fontsize': 40}) # Font size for legend -plt.rcParams['font.family'] = 'DejaVu Sans' +#======================================================================================================================================= +# INITIALIZATION +#======================================================================================================================================= +# Set plotting parameters for matplotlib +plt.rcParams.update({'font.size': 40, 'axes.titlesize': 40, 'axes.labelsize': 40, 'xtick.labelsize': 40, 'ytick.labelsize': 40, 'lines.linewidth': 3.0, 'legend.fontsize': 40}) +plt.rcParams['font.family'] = 'DejaVu Sans' +# parse input arguments import argparse parser = argparse.ArgumentParser(description='') parser.add_argument('--outdir', type=str, default='./search_results') @@ -35,7 +37,10 @@ args = parser.parse_args() outdir = args.outdir -################################################################################################################################ + +#======================================================================================================================================= +# FUNCTIONS +#======================================================================================================================================= def get_rand(x=False): if x: return np.random.uniform(80,120)*0.01 @@ -135,47 +140,52 @@ def cnv_root_to_np(histo): y_fit = np.array([fitfunc.Eval(x) for x in x_fit]) return (xvals, yvals, errors), (x_fit, y_fit) +#======================================================================================================================================= +# LOAD DATA +#======================================================================================================================================= -################################################################################################################################### - -#Load signal processor +# Initialize signal processor search_window = 1.5 #used in final search signalProcessor = simp_signal_2016.SignalProcessor(args.mpifpi, search_window) -#Read in data, MC, and signal +# Load data, MC background, and signal samples = {} branches = ["unc_vtx_ele_track_z0","unc_vtx_pos_track_z0"] -#Read 10% Data +# Load 10% Data infile = '/sdf/group/hps/user-data/alspellm/2016/data/hadd_BLPass4c_1959files.root' selection = 'vtxana_Tight_L1L1_nvtx1' samples['data'] = signalProcessor.load_data(infile,selection, expressions=branches, cut_expression='((unc_vtx_psum > 1.0) & (unc_vtx_psum < 1.9) )') samples['data']['weight'] = 1.0 #Assign weight of 10 to scale up to full lumi -#Load MC background +# Load MC background lumi = 10.7*.1 #pb-1 mc_scale = {'data' : 1.0, 'tritrig' : 1.416e9*lumi/(50000*10000), 'wab' : 0.1985e12*lumi/(100000*10000)} -#tritrig +# Load MC tritrig infile = '/sdf/group/hps/user-data/alspellm/2016/tritrig_mc/pass4b/hadded_tritrig-beam-10kfiles-ana-smeared-corr_beamspotfix.root' samples['tritrig'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.0) & (unc_vtx_psum < 1.9) )', expressions=branches) samples['tritrig']['weight'] = mc_scale['tritrig'] -#wab +# Load MC wab infile = '/sdf/group/hps/user-data/alspellm/2016/wab_mc/pass4b/hadded_wab-beam-10kfiles-ana-smeared-corr_beamspotfix.root' samples['wab'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.0) & (unc_vtx_psum < 1.9) )', expressions=branches) samples['wab']['weight'] = mc_scale['wab'] -#After smearing factor has been calculated, set to true to compare z0 distributions with smeared MC background +#======================================================================================================================================= +# Check smearing after the fact +#======================================================================================================================================= + +# After the smearing factor has been calculated, set to True to compare data and smeared MC background smear = True if smear: - mc_sigma = 0.1251 - data_sigma = 0.1348 + mc_sigma = 0.1251 # Calculated in this script + data_sigma = 0.1348 # Calculated in this script smearF = np.sqrt(data_sigma**2 - mc_sigma**2) - #smear tritrig + # Smear MC tritrig rel_smear = np.random.normal(0.0, 1.0, len(samples['tritrig'].unc_vtx_min_z0)) smearfactors = rel_smear*smearF samples['tritrig']['unc_vtx_ele_track_z0'] = smearfactors + samples['tritrig']['unc_vtx_ele_track_z0'] @@ -183,7 +193,7 @@ def cnv_root_to_np(histo): smearfactors = rel_smear*smearF samples['tritrig']['unc_vtx_pos_track_z0'] = smearfactors + samples['tritrig']['unc_vtx_pos_track_z0'] - #smear wab + # Smear MC wab rel_smear = np.random.normal(0.0, 1.0, len(samples['wab'].unc_vtx_min_z0)) smearfactors = rel_smear*smearF samples['wab']['unc_vtx_ele_track_z0'] = smearfactors + samples['wab']['unc_vtx_ele_track_z0'] @@ -191,8 +201,11 @@ def cnv_root_to_np(histo): smearfactors = rel_smear*smearF samples['wab']['unc_vtx_pos_track_z0'] = smearfactors + samples['wab']['unc_vtx_pos_track_z0'] +#======================================================================================================================================= +# Calculate z0 width in data and MC background +#======================================================================================================================================= -#Plot z0 for data and MC backgrounds +# Initialize z0 histograms z0_h = ( hist.Hist.new .StrCategory(list(samples.keys()), name='samples') @@ -200,23 +213,26 @@ def cnv_root_to_np(histo): .Double() ) -#Fill without weights, so that histos can be converted to ROOT and retain statistical uncertainty +# Fill without weights, so that histos can be converted to ROOT and retain statistical uncertainty z0_histos = {} for sname, sample in samples.items(): - z0_h.fill(sname, sample.unc_vtx_ele_track_z0)#, weight=sample.weight/ak.sum(sample.weight)) - z0_h.fill(sname, sample.unc_vtx_pos_track_z0)#, weight=sample.weight/ak.sum(sample.weight)) - z0_histos[sname] = signalProcessor.cnvHistoToROOT(z0_h[sname,:]) + z0_h.fill(sname, sample.unc_vtx_ele_track_z0) + z0_h.fill(sname, sample.unc_vtx_pos_track_z0) + z0_histos[sname] = signalProcessor.cnvHistoToROOT(z0_h[sname,:]) # Convert hist histogram to ROOT z0_histos[sname].Scale(mc_scale[sname]) -#Scale Tritrig and WAB and combine with proper errors +# Scale Tritrig and WAB and combine with proper errors z0_histos['tritrig_wab'] = z0_histos['tritrig'].Clone() z0_histos['tritrig_wab'].Add(z0_histos['wab']) -#Normalize + +# Normalize histograms for sname, sample in z0_histos.items(): print(z0_histos[sname].Integral(0,-1)) z0_histos[sname].Scale(1./z0_histos[sname].Integral(0,-1)) -#Make plots of data vs MC background +# Plot data and MC background z0 distributions +# Fit each with Gaussian to determine z0 width +# MC width narrower than data. Smear MC background to match data fig, ax = plt.subplots(2,1, figsize=(25,30)) #Data plt.subplot(2,1,1) @@ -241,27 +257,30 @@ def cnv_root_to_np(histo): plt.ylim(0.0, 0.03) plt.savefig(f'{outdir}/impact_parameter_data_v_mc_smeared_{smear}.png') -####################################################################################################################################### +#smearing factors calculated from comparing data and MC bkg z0 widths +mc_sigma = 0.1251 +data_sigma = 0.1348 +smearF = np.sqrt(data_sigma**2 - mc_sigma**2) + +#======================================================================================================================================= +# Smear MC signal and calculate change in signal efficiency +#======================================================================================================================================= -#Smear the signal using ratio of data and MC widths sysvals = [] masses = [] +# Directory containing MC signal hpstr vertex ana processor tuples indir = '/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/beam/smeared_fixbeamspot' for mass in range(30,120,4): masses.append(mass) + # MC signal hpstr MC ana processor (truth vertex z information) signal_pre_readout_path = lambda mass: f'/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/nobeam/mass_{mass}_simp_2pt3_slic_hadd_ana.root' signal_path = lambda mass: f'{indir}/mass_{mass}_hadd-simp-beam_ana_smeared_corr.root' signal_selection = 'vtxana_radMatchTight_2016_simp_SR_analysis' signal = signalProcessor.load_signal(signal_path(signal_mass), signal_pre_readout_path(signal_mass), signal_mass, signal_selection) signal['weight']=1.0 - psum_sel = signalProcessor.psum_sel(signal, case='sr') - - #smearing factors calculated from comparing data and MC bkg z0 widths - mc_sigma = 0.1251 - data_sigma = 0.1348 - smearF = np.sqrt(data_sigma**2 - mc_sigma**2) + psum_sel = signalProcessor.psum_sel(signal, case='sr') #Psum signal region selection - #smear signal minz0 + # Smear electron and positron track z0 rel_smear = np.random.normal(0.0, 1.0, len(signal.unc_vtx_min_z0)) smearfactors = rel_smear*smearF signal['unc_vtx_ele_track_z0_smeared'] = smearfactors + signal['unc_vtx_ele_track_z0'] @@ -270,7 +289,7 @@ def cnv_root_to_np(histo): smearfactors = rel_smear*smearF signal['unc_vtx_pos_track_z0_smeared'] = smearfactors + signal['unc_vtx_pos_track_z0'] - #calculate smeared minz0 + # Calculate smeared minz0 signal['unc_vtx_min_z0_smeared'] = np.minimum(abs(signal['unc_vtx_ele_track_z0_smeared']), abs(signal['unc_vtx_pos_track_z0_smeared'])) #Calculate change in efficiency diff --git a/plotUtils/simps/systematics/radiative_acceptance_target_position_systematic.py b/plotUtils/simps/systematics/radiative_acceptance_target_position_systematic.py index 854e069ba..e47ce397d 100644 --- a/plotUtils/simps/systematics/radiative_acceptance_target_position_systematic.py +++ b/plotUtils/simps/systematics/radiative_acceptance_target_position_systematic.py @@ -1,4 +1,9 @@ #!/usr/bin/python3 +#======================================================================================================================================= +# Description: Calculates systematic uncertainty associated with target position uncertainty (0.5 mm) +# related to radiative trident acceptance. +# MC radiative tridents+beam at nominal, -0.5 mm, and +0.5 mm + import os import awkward as ak import numpy as np @@ -14,20 +19,19 @@ import sys import math +# SIMP tools in hpstr hpstr_base = os.getenv('HPSTR_BASE') sys.path.append(f'{hpstr_base}/plotUtils/simps') import simp_signal_2016 -#format mpl plots -plt.rcParams.update({'font.size': 40, # Font size for text - 'axes.titlesize': 40, # Font size for titles - 'axes.labelsize': 40, # Font size for axis labels - 'xtick.labelsize': 40, # Font size for x-axis tick labels - 'ytick.labelsize': 40, # Font size for y-axis tick labels - 'lines.linewidth':3.0, - 'legend.fontsize': 40}) # Font size for legend +#====================================================================================================================================== +#INITIALIZATION +#======================================================================================================================================= +# Set plotting parameters for matplotlib +plt.rcParams.update({'font.size': 40, 'axes.titlesize': 40, 'axes.labelsize': 40, 'xtick.labelsize': 40, 'ytick.labelsize': 40, 'lines.linewidth': 3.0, 'legend.fontsize': 40}) plt.rcParams['font.family'] = 'DejaVu Sans' +#parse input arguments import argparse parser = argparse.ArgumentParser(description='') parser.add_argument('--outdir', type=str, default='./search_results') @@ -35,16 +39,21 @@ args = parser.parse_args() outdir = args.outdir -################################################################################################################################ + +#======================================================================================================================================= +# LOAD MC RADIATIVE TRIDENTS FOR EACH TARGET POSITION +#======================================================================================================================================= + +# Invariant mass search window size search_window = 1.5 +# Initialize signal processor that contains everything needed to calculate expected signal signalProcessor = simp_signal_2016.SignalProcessor(args.mpifpi, search_window) samples = {} mcsamples = {} branches = ["unc_vtx_mass"] -#LOAD NOMINAL RAD + BEAM -#rad+beam +# Load nominal radiative tridents + beam MC infile = '/sdf/group/hps/user-data/alspellm/2016/rad_mc/pass4b/rad_beam/rad-beam-hadd-10kfiles-ana-smeared-corr.root' selection = 'vtxana_radMatchTight_nocuts' samples['nominal_beam'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.9) & (unc_vtx_psum < 2.4) )', expressions=branches) @@ -54,30 +63,31 @@ mcsamples['nominal_beam'] = copy.deepcopy(slicfile.Get('mcAna/mcAna_mc622Mass_h')) slicfile.Close() -#LOAD NOMINAL RAD + BEAM Mpt5 -#rad+beam +# Load nominal-0.5 mm radiative tridents + beam MC infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/hadd_1937files_rad_beam_targetz_Mpt5_recon_ana.root' selection = 'vtxana_radMatchTight_nocuts' #USE RADMATCHTIGHT! samples['targetz_Mpt5_beam'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.9) & (unc_vtx_psum < 2.4) )', expressions=branches) -#mc ana +# Load radiative trident mc ana (generated rate) infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/hadd_1937files_rad_beam_targetz_Mpt5_mc_ana.root' slicfile = r.TFile(infile, "READ") mcsamples['targetz_Mpt5_beam'] = copy.deepcopy(slicfile.Get('mcAna/mcAna_mc622Mass_h')) slicfile.Close() -#LOAD NOMINAL RAD + BEAM Ppt5 -#rad+beam +# Load nominal-0.5 mm radiative tridents + beam MC infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/hadd_1937files_rad_beam_targetz_Ppt5_recon_ana.root' selection = 'vtxana_radMatchTight_nocuts' #USE RADMATCHTIGHT! samples['targetz_Ppt5_beam'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.9) & (unc_vtx_psum < 2.4) )', expressions=branches) -#mc ana +# Load radiative trident mc ana (generated rate) infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/radacc/hadd_1937files_rad_beam_targetz_Ppt5_mc_ana.root' slicfile = r.TFile(infile, "READ") mcsamples['targetz_Ppt5_beam'] = copy.deepcopy(slicfile.Get('mcAna/mcAna_mc622Mass_h')) slicfile.Close() +#======================================================================================================================================= +# CALCULATE RADIATIVE TRIDENT ACCEPTANCE +#======================================================================================================================================= -#init invariant mass plot +# Initialize histogram used to calculate radiative trident acceptance nbinsx = mcsamples['nominal_beam'].GetNbinsX() first_bin = mcsamples['nominal_beam'].GetBinLowEdge(1) last_bin = nbinsx*mcsamples['nominal_beam'].GetBinWidth(1) @@ -88,7 +98,7 @@ .Double() ) -#Fill without weights, so that histos can be converted to ROOT and retain statistical uncertainty +# Fill without weights, so that histos can be converted to ROOT and retain statistical uncertainty invmass_histos = {} for sname, sample in samples.items(): invmass_h.fill(sname, sample.unc_vtx_mass*1000.) @@ -115,7 +125,7 @@ def nonUniBinning(histo, start, size): # mcsamples[sname] = nonUniBinning(mcsamples[sname], 150, 4) -#calculate radiative acceptance +# Calculate radiative trident acceptance for each case (nominal, -0.5 mm, +0.5 mm) fits = {} colors = ['#d62728', '#bcbd22', '#2ca02c', '#17becf', '#1f77b4', '#9467bd', '#7f7f7f'] colors = ['black', 'darkred', 'darkblue', 'darkgreen', 'darkorange'] @@ -125,8 +135,10 @@ def nonUniBinning(histo, start, size): plt.ylabel('Radiative Acceptance') labels = ['Nominal (-4.3 mm)', '-4.8 mm', '-3.8 mm'] for i,(sname, histo) in enumerate(invmass_histos.items()): - ratio = invmass_histos[sname].Clone() - ratio.Divide(mcsamples[sname]) + ratio = invmass_histos[sname].Clone() + ratio.Divide(mcsamples[sname]) # radiative trident acceptance + + # Fit the radiative trident acceptance ratio with a polynomial fit function fit_params,_ = signalProcessor.fit_plot_with_poly(ratio, specify_n=7, set_xrange=True, xrange=(30.0, 220.0)) print(sname, fit_params) (xvals, yvals, errors), (x_fit, y_fit) = signalProcessor.cnv_root_to_np(ratio) diff --git a/plotUtils/simps/systematics/signal_target_position_systematic.py b/plotUtils/simps/systematics/signal_target_position_systematic.py index 3d9c16dbd..116b870f7 100644 --- a/plotUtils/simps/systematics/signal_target_position_systematic.py +++ b/plotUtils/simps/systematics/signal_target_position_systematic.py @@ -1,8 +1,8 @@ -#!/usr/bin/env python -# coding: utf-8 - -# In[76]: - +#!/usr/bin/python3 +#======================================================================================================================================= +# Description: Calculates systematic uncertainty associated with target position uncertainty (0.5 mm) +# related to MC signal acceptance +# MC signal (NO beam) at nominal, -0.5 mm, and +0.5 mm import os import awkward as ak @@ -12,189 +12,45 @@ import uproot import ROOT as r import copy - import matplotlib.pyplot as plt import matplotlib as mpl import mplhep import matplotlib.gridspec as gridspec - import sys -sys.path.append('/sdf/group/hps/user-data/alspellm/2016/plotting') -import hps_plot_utils as utils - -get_ipython().run_line_magic('matplotlib', 'inline') -mpl.style.use(mplhep.style.ROOT) import math -import pickle -sys.path.append('/sdf/home/a/alspellm/src/hpstr_v62208/plotUtils/simps') +# SIMP tools in hpstr +hpstr_base = os.getenv('HPSTR_BASE') +sys.path.append(f'{hpstr_base}/plotUtils/simps') import simp_signal_2016 -from simp_theory_equations import SimpEquations as simpeqs -import copy -# Set global font sizes -plt.rcParams.update({'font.size': 40, # Font size for text - 'axes.titlesize': 40, # Font size for titles - 'axes.labelsize': 40, # Font size for axis labels - 'xtick.labelsize': 40, # Font size for x-axis tick labels - 'ytick.labelsize': 40, # Font size for y-axis tick labels - 'lines.linewidth':3.0, - 'legend.fontsize': 40}) # Font size for legend -plt.rcParams['font.family'] = 'DejaVu Sans' - - -# In[2]: - - -samples = {} -branches = ['unc_vtx_ele_track_z0','unc_vtx_pos_track_z0', 'unc_vtx_z', 'unc_vtx_mass', 'unc_vtx_proj_sig'] -infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simps/target_unc/Mpt5/hadd_simp_mass_60_nobeam_target_Mpt5_recon_ana.root' -infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simps/target_unc/Mpt5/hadd_simp_mass_60_nobeam_target_Mpt5_recon_ana.root' -infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simp_target_unc/nov0proj/nominal.root' -signalProcessor = simp_signal_2016.SignalProcessor(np.pi*4., 1.5) -selection = 'vtxana_radMatchTight_2016_simp_SR_analysis' #USE RADMATCHTIGHT! -samples['Mpt5'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.0) & (unc_vtx_psum < 1.9) )', expressions=branches) -samples['Mpt5']['weight'] = 1.0 - -infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simps/nominal/hadd_simp_mass_60_nobeam_nominal_recon_ana.root' -signalProcessor = simp_signal_2016.SignalProcessor(np.pi*4., 1.5) -samples['nominal'] = signalProcessor.load_data(infile, selection, cut_expression='((unc_vtx_psum > 1.0) & (unc_vtx_psum < 1.9) )', expressions=branches) -samples['nominal']['weight'] = 1.0 - - -# In[ ]: - - -z0_h = ( - hist.Hist.new - .StrCategory(list(samples.keys()), name='samples') - .Reg(60,-3.0, 3.0,label='Track y0 [mm]') - .Double() -) -for sname, sample in samples.items(): - z0_h.fill(sname, samples[sname].unc_vtx_ele_track_z0) - z0_h.fill(sname, samples[sname].unc_vtx_pos_track_z0) -fig, ax = plt.subplots(figsize=(20,10)) -z0_h.plot() -plt.legend() - -miny0_h = ( - hist.Hist.new - .StrCategory(list(samples.keys()), name='samples') - .Reg(60,0, 3.0,label='Min y0 [mm]') - .Double() -) -for sname, sample in samples.items(): - miny0_h.fill(sname, samples[sname].unc_vtx_min_z0) - -fig, ax = plt.subplots(figsize=(20,10)) -miny0_h.plot() -nomsum = ak.sum(samples['nominal'].weight) -mpt5sum = ak.sum(samples['Mpt5'].weight) -mpt5sum/nomsum - - -# In[36]: - - -vprojsig_h = ( - hist.Hist.new - .StrCategory(list(samples.keys()), name='samples') - .Reg(100,0, 20.0,label='vprojsig') - .Double() -) -for sname, sample in samples.items(): - vprojsig_h.fill(sname, samples[sname].unc_vtx_proj_sig) -vprojsig_h.plot() - - -# In[ ]: - - - - - -# In[33]: - - -nominal_mask = signalProcessor.minz0_sel(samples['nominal']) -Mpt5_mask = signalProcessor.minz0_sel(samples['Mpt5']) -print(len(samples['nominal'][nom_mask].unc_vtx_z)) -print(len(samples['Mpt5'][Mpt5_mask].unc_vtx_z)) - - -# In[181]: - +# Load nominal infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simp_target_unc/nov0proj/nominal.root' with uproot.open(infile) as f: nominal_h = f['expected_signal_ap_h'].to_hist() test_h = f['expected_signal_vd_h'].to_hist() +# Load -0.5 mm infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simp_target_unc/nov0proj/target_Mpt5.root' with uproot.open(infile) as f: Mpt5_h = f['expected_signal_ap_h'].to_hist() ratio_Mpt5_h = f['expected_signal_ap_h'].to_hist().reset() +# Load +0.5 mm infile = '/sdf/group/hps/user-data/alspellm/2016/systematics/simp_target_unc/nov0proj/target_Ppt5.root' with uproot.open(infile) as f: Ppt5_h = f['expected_signal_ap_h'].to_hist() ratio_Ppt5_h = f['expected_signal_ap_h'].to_hist().reset() -# In[182]: - - -nominal_h.plot() -plt.show() -test_h.plot() -plt.show() -Mpt5_h.plot() -plt.show() -Ppt5_h.plot() - - -# In[79]: - - -#take ratio of densities, misaligned to nominal -ratio_Mpt5 = Mpt5_h.values()/nominal_h.values() -mask = nominal_h.values() < 0.0 -ratio_Mpt5[mask] = 0 - -xbins = ratio_Mpt5_h.axes[0].centers -ybins = ratio_Mpt5_h.axes[1].centers -xgrid, ygrid = np.meshgrid(xbins, ybins, indexing='ij') -ratio_Mpt5_h.reset() -ratio_Mpt5_h.fill(xgrid.flatten(), ygrid.flatten(), weight=ratio_Mpt5.flatten()) -fig, ax = plt.subplots(figsize=(25,15)) -#ratio_Mpt5_h.plot(cmin=0.9, cmax=np.max(ratio_Mpt5.flatten())) -ratio_Mpt5_h.plot(cmin=0.8, cmax=1.2, cmap='RdYlBu') - - -# In[80]: #take ratio of densities, misaligned to nominal -ratio_Ppt5 = Ppt5_h.values()/nominal_h.values() -mask = nominal_h.values() < 0.0 -ratio_Ppt5[mask] = 0 - -xbins = ratio_Ppt5_h.axes[0].centers -ybins = ratio_Ppt5_h.axes[1].centers -xgrid, ygrid = np.meshgrid(xbins, ybins, indexing='ij') -ratio_Ppt5_h.reset() -ratio_Ppt5_h.fill(xgrid.flatten(), ygrid.flatten(), weight=ratio_Ppt5.flatten()) -fig, ax = plt.subplots(figsize=(25,15)) -ratio_Ppt5_h.plot(cmin=0.8, cmax=1.2, cmap='seismic') - -# In[184]: +# Estimate systematic in a simple way by just taking the ratio of the expected signal rate between the two shifted target positions. +# This is probably too conservative, but what I did to finish my dissertation in time (Alic) - -##### take ratio of densities, misaligned to nominal ratio_PM = Ppt5_h.values()/Mpt5_h.values() -#mask = Ppt5_h.values() < 0.5 -#ratio_PM[mask] = 0 xbins = ratio_Ppt5_h.axes[0].centers ybins = ratio_Ppt5_h.axes[1].centers @@ -205,12 +61,14 @@ ratio_Ppt5_h.plot(cmin=0.90, cmax=1.1, cmap='seismic') plt.text(124, -5.4,'Expected Signal Ratio\n Between Off-Nominal Targets' , horizontalalignment='center') plt.ylim(-6.25, -4.7) -#plt.xlim(79.7,199.2) plt.xlim(50.0,210.0) plt.ylabel(r'$\log{\epsilon^2}$', fontsize=50) plt.xlabel('A\' Invariant Mass [MeV]') plt.savefig('signal_target_uncertainty_offnominal_ratio_2d.png') +# The systematic uncertainty is a function of both mass and epsilon. +# I decided to just take the worst case scenario for each MC mass across all relevent values of epsilon (where by relevent, I mean +# values of epsilon where we were able to put a 90% upper limit on the signal rate). masses = [] minvalues = [] for m, mass in enumerate(ratio_Ppt5_h.axes[0].centers): @@ -224,6 +82,7 @@ masses.append(mass) minvalues.append(minv) +# Fit the systematic uncertainty as a function of mass coefficients = np.polyfit(masses, minvalues, 4) print(coefficients) fitfunc = np.poly1d(coefficients) @@ -241,31 +100,3 @@ plt.legend() plt.savefig('signal_target_uncertainty_offnominal_v2.png') - -# In[ ]: - - - - - -# In[180]: - - -import numpy as np -import scipy.stats as stats - -n_sigma = 1.5 -percentage = stats.norm.cdf(n_sigma) - stats.norm.cdf(-n_sigma) -print(percentage) - -n_sigma = 1.5 - (1.5*0.087) -percentage_2 = stats.norm.cdf(n_sigma) - stats.norm.cdf(-n_sigma) -print(percentage_2) -print(1 -(percentage_2/percentage)) - - -# In[147]: - - - - diff --git a/plotUtils/simps/systematics/v0projsig_systematic.py b/plotUtils/simps/systematics/v0projsig_systematic.py index 1f6b1ed0b..0a6cc596e 100644 --- a/plotUtils/simps/systematics/v0projsig_systematic.py +++ b/plotUtils/simps/systematics/v0projsig_systematic.py @@ -1,4 +1,8 @@ #!/usr/bin/python3 +#======================================================================================================================================= +# Description: Calculates systematic uncertainty associated with the target projected vertex significance cut. +# The systematic is calculated by comparing the resulting cut efficiency in data and MC background. + import os import awkward as ak import numpy as np @@ -14,20 +18,19 @@ import sys import math +# SIMP tools in hpstr hpstr_base = os.getenv('HPSTR_BASE') sys.path.append(f'{hpstr_base}/plotUtils/simps') import simp_signal_2016 -#format mpl plots -plt.rcParams.update({'font.size': 40, # Font size for text - 'axes.titlesize': 40, # Font size for titles - 'axes.labelsize': 40, # Font size for axis labels - 'xtick.labelsize': 40, # Font size for x-axis tick labels - 'ytick.labelsize': 40, # Font size for y-axis tick labels - 'lines.linewidth':3.0, - 'legend.fontsize': 40}) # Font size for legend +#====================================================================================================================================== +#INITIALIZATION +#======================================================================================================================================= +# Set plotting parameters for matplotlib +plt.rcParams.update({'font.size': 40, 'axes.titlesize': 40, 'axes.labelsize': 40, 'xtick.labelsize': 40, 'ytick.labelsize': 40, 'lines.linewidth': 3.0, 'legend.fontsize': 40}) plt.rcParams['font.family'] = 'DejaVu Sans' +#parse input arguments import argparse parser = argparse.ArgumentParser(description='') parser.add_argument('--outdir', type=str, default='./search_results') @@ -35,23 +38,26 @@ args = parser.parse_args() outdir = args.outdir -####################################################################################################################################### -#Load signal processor +#======================================================================================================================================= +# LOAD DATA AND MC BACKGROUND +#======================================================================================================================================= + +# Initialize the signal processor search_window = 1.5 #used in final search signalProcessor = simp_signal_2016.SignalProcessor(args.mpifpi, search_window) -#Read in data and MC bkg samples = {} branches = ["unc_vtx_proj_sig","unc_vtx_ele_track_z0","unc_vtx_pos_track_z0"] -#Read 10% Data +# Load 10% data infile = '/sdf/group/hps/user-data/alspellm/2016/data/hadd_BLPass4c_1959files.root' selection = 'vtxana_Tight_L1L1_nvtx1' +# Select Psum signal region samples['data'] = signalProcessor.load_data(infile,selection, expressions=branches, cut_expression='((unc_vtx_psum > 1.0) & (unc_vtx_psum < 1.9) )') samples['data']['weight'] = 1.0 #Assign weight of 10 to scale up to full lumi -#Load MC background +# Load MC background lumi = 10.7*.1 #pb-1 mc_scale = {'tritrig' : 1.416e9*lumi/(50000*10000), 'wab' : 0.1985e12*lumi/(100000*10000)} @@ -68,7 +74,7 @@ #Combine tritrig and wab samples['tritrig+wab+beam'] = ak.concatenate([samples['tritrig'], samples['wab']]) -#init histogram of v0 projection significance values to compare data and MC background +# Initialize histograms to calculate cut efficiencies v0projsig_h = ( hist.Hist.new .StrCategory(list(samples.keys()), name='samples') @@ -76,11 +82,11 @@ .Double() ) -#Fill without weights, so that histos can be converted to ROOT and retain statistical uncertainty +# Fill without weights, so that histos can be converted to ROOT and retain statistical uncertainty for sname, sample in samples.items(): v0projsig_h.fill(sname, sample.unc_vtx_proj_sig, weight=sample.weight/ak.sum(sample.weight)) -#Events that pass v0projsig in data vs MC bkg +# Calculate efficiencies eff_mc = round(v0projsig_h['tritrig+wab+beam',:][:hist.loc(2.0):sum]/v0projsig_h['tritrig+wab+beam',:][::sum],2) eff_data = round(v0projsig_h['data',:][:hist.loc(2.0):sum]/v0projsig_h['data',:][::sum],2) @@ -93,3 +99,4 @@ plt.ylabel('Normalized Events') plt.yscale('log') plt.savefig(f'{outdir}/v0projsig_systematic_lowpsum.png') + From 32d7ef147cac906dce301967d34063a469b21346 Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Tue, 10 Sep 2024 07:55:46 -0700 Subject: [PATCH 26/27] rename file --- plotUtils/simps/{cmax.py => gen_oim_lookuptable.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename plotUtils/simps/{cmax.py => gen_oim_lookuptable.py} (100%) diff --git a/plotUtils/simps/cmax.py b/plotUtils/simps/gen_oim_lookuptable.py similarity index 100% rename from plotUtils/simps/cmax.py rename to plotUtils/simps/gen_oim_lookuptable.py From f573bb18c31007dff416faf4bae8b81318d28b79 Mon Sep 17 00:00:00 2001 From: Alic Shen Spellman Date: Tue, 10 Sep 2024 14:40:21 -0700 Subject: [PATCH 27/27] adding some ana scripts --- plotUtils/simps/fit_beamspot/fit_beamspot.py | 349 ++++++++++ .../simps/mass_resolution/.moller_ana.py.swp | Bin 0 -> 16384 bytes .../mass_resolution/fee_smearing_nhits.py | 189 ++++++ .../mass_resolution/fit_mass_resolution.py | 342 ++++++++++ plotUtils/simps/mass_resolution/moller_ana.py | 364 +++++++++++ plotUtils/simps/simp_plot_utils.py | 597 ++++++++++++++++++ 6 files changed, 1841 insertions(+) create mode 100644 plotUtils/simps/fit_beamspot/fit_beamspot.py create mode 100644 plotUtils/simps/mass_resolution/.moller_ana.py.swp create mode 100644 plotUtils/simps/mass_resolution/fee_smearing_nhits.py create mode 100644 plotUtils/simps/mass_resolution/fit_mass_resolution.py create mode 100644 plotUtils/simps/mass_resolution/moller_ana.py create mode 100644 plotUtils/simps/simp_plot_utils.py diff --git a/plotUtils/simps/fit_beamspot/fit_beamspot.py b/plotUtils/simps/fit_beamspot/fit_beamspot.py new file mode 100644 index 000000000..19b2e9f98 --- /dev/null +++ b/plotUtils/simps/fit_beamspot/fit_beamspot.py @@ -0,0 +1,349 @@ +#!/usr/bin/python3 +""" +This script is used to characterize the beampsot in data and MC. The beamspot is fitted with a rotated 2D gaussian, and the +fit results are saved to a json file that is loaded by the hpstr vertex analysis processor. +The beamspot is estimated by projected unconstrained v0 vertices back to the target location. +""" +import os +import awkward as ak +import numpy as np +import hist +from hist import Hist +import uproot +import math +import ROOT as r +import glob as glob +import re +import json +import matplotlib.pyplot as plt +import matplotlib as mpl +import mplhep +get_ipython().run_line_magic('matplotlib', 'inline') +mpl.style.use(mplhep.style.ROOT) + +# SIMP tools defined in hpstr +import sys +hpstr_base = os.getenv('HPSTR_BASE') +sys.path.append(f'{hpstr_base}/plotUtils/simps') +import simp_plot_utils as utils + +#======================================================================================================================================= +# INITIALIZE +#======================================================================================================================================= +# --outdir: Specify output directory + +import argparse +parser = argparse.ArgumentParser(description='Process some inputs.') +parser.add_argument('--outdir', type=str, default='fit_results') +args = parser.parse_args() +outfilename = args.outdir + +#======================================================================================================================================= +# Functions +#======================================================================================================================================= +def show(*, filepath = None, text_ax = None): + """my own plt.show() equivalent that adds the HPS labels + and could save to a file if a filepath is given. + + Parameters + ---------- + filepath: str, pathlib.Path + path to where the plot should be written to, extension defines type + If None (default), don't save the figure to a file. + text_ax: mpl.axes.Axes + axes to put HPS labels on, helpful if there are two axes + (e.g. in a ratio plot). If None (default), the labels are + just put on the current axes as defined by MPL (usually last + axes drawn on). + """ + #mplhep.label.exp_text('HPS','Internal', ax = text_ax) + #mplhep.label.lumitext('10% Data', ax = text_ax) + if filepath is not None: + # use bbox_inches='tight' to resize canvas to tightly fit all + # of the drawn stuff + plt.savefig(filepath, bbox_inches='tight') + plt.show() + +def load_data(filepath, selection, cut_expression = None, expressions=None): + with uproot.open(filepath) as f: + events = f[f'{selection}/{selection}_tree'].arrays( + cut=cut_expression, + expressions = expressions + ) + return events + +# Set global font sizes +plt.rcParams.update({'font.size': 50, # Font size for text + 'axes.titlesize': 50, # Font size for titles + 'axes.labelsize': 50, # Font size for axis labels + 'xtick.labelsize': 50, # Font size for x-axis tick labels + 'ytick.labelsize': 50, # Font size for y-axis tick labels + 'lines.linewidth':4.0, + 'legend.fontsize': 40}) # Font size for legend + +def rotate_coordinates(x, y, angle): + """ + The beamspot fit is performed in the rotated coordinate system according to the fitted rotation angle. + """ + x_rotated = x*math.cos(angle) - y*math.sin(angle) + y_rotated = x*math.sin(angle) + y*math.cos(angle) + return (x_rotated, y_rotated) + +def gauss2DFit_Rotated(x, par): + """ + Defines the rotated 2D gaussian fit function used to fit the beamspot. + """ + amplitude = par[0] + meanx = par[1] + meany = par[2] + sigmax = par[3] + sigmay = par[4] + angle = par[5] + x_rotated = x[0] * math.cos(angle) - x[1] * math.sin(angle) + y_rotated = x[0] * math.sin(angle) + x[1] * math.cos(angle) + + exponent = -0.5 * ((x_rotated - meanx)**2 / sigmax**2 + (y_rotated - meany)**2 / sigmay**2) + return amplitude * math.exp(exponent) + +def gaus1DFit(histo, xmin, xmax): + """ + 1D Gaussian fit used to seed the 2D fit. + """ + fit = histo.Fit("gaus", "QRS", "", xmin, xmax) + try: + mean = fit.Parameter(1) + except: + return None, None + sig = fit.Parameter(2) + return mean, sig + +def projectVertex(target_pos, vz, pz, px, py, vx, vy): + """ + Projects vertex back to target location using vertex momentum. + """ + projx = vx - ((vz-target_pos)*(px/pz)) + projy = vy - ((vz-target_pos)*(py/pz)) + + return projx, projy + +def runVertex2DFit(vtx_proj_hh, run_fit_params, run, outdir, outfile, nsigma=1.5): + """ + Runs the target projected vertex beamspot fitting procedure. + Saves the fit results to a json file. + + Args: + vtx_proj_hh (TH2F): 2D histogram of target projected veretx x and y positions. + run_fit_params (list(float)): List to store fit parameter results. + run (int): Run number. + outdir (str): Output directory. + outfile (str): Output ROOT file. + nsigma (float, optional): Width of rotated 2D gaussian fit in x and y. + """ + + # Get the not-rotated x and y projections to seed the fits + projy = vtx_proj_hh.ProjectionY('projy',0, -1, "") + mean_y, sig_y = gaus1DFit(projy, -0.5, 0.5) + projx = vtx_proj_hh.ProjectionX('projx',0, -1, "") + mean_x, sig_x = gaus1DFit(projx, -2.0, 2.0) + projy.Write() + projx.Write() + del projy + del projx + + if mean_y is None or mean_x is None: + return + + # Make dir for run being fit + outfile.cd() + outfile.mkdir('fit_results_run_%s'%(run)) + rundir = outfile.GetDirectory('fit_results_run_%s'%(run)) + rundir.cd() + + # Perform rotated 2d gaussian fit + fitFunc = r.TF2("gaussian", gauss2DFit_Rotated, -5.0,5.0,-1.0, 1.0, 6) + fitFunc.SetRange(mean_x - (nsigma*sig_x), mean_y - (nsigma*sig_y), mean_x+(nsigma*sig_x), mean_y+(nsigma*sig_y)) + fitFunc.SetParameters(1.0, mean_x, mean_y, sig_x, sig_y, 1.0) + vtx_proj_hh.Fit(fitFunc, "RS") + params = fitFunc.GetParameters() + xpos = params[1] + ypos = params[2] + xsigma = params[3] + ysigma = params[4] + angle = params[5] + xrot, yrot = rotate_coordinates(xpos, ypos, -angle) + + canvas = r.TCanvas('target_proj_vtx_fits_run_%s'%(run), "Run %s Target Vertex Projection Fit"%(run),2000, 1500) + vtx_proj_hh.GetXaxis().SetTitleSize(0.05) + vtx_proj_hh.GetYaxis().SetTitleSize(0.05) + + canvas.SetLeftMargin(0.15) + canvas.SetTopMargin(0.1) + canvas.SetBottomMargin(0.15) + + vtx_proj_hh.GetXaxis().SetRangeUser(-.8, .8) + vtx_proj_hh.GetYaxis().SetRangeUser(-.3, .3) + vtx_proj_hh.SetName('target_proj_vtx_fits_run_%s'%(run)) + vtx_proj_hh.Draw("COLZ") + fitFunc.Draw("SAME") + vtx_proj_hh.Write() + canvas.Write() + canvas.SaveAs(f'{outdir}/{canvas.GetName()}.png') + canvas.Close() + + del fitFunc + run_fit_params[run] = [xpos, ypos, xsigma, ysigma, angle, mean_x, mean_y, sig_x, sig_y] + +def writeFitResultsJson(fit_results, output_json_file): + #Save fit results to json file + json_data = {} + for key, values in fit_results.items(): + entry = {'target_position':-4.3, 'rotated_mean_x': values[0], 'rotated_mean_y':values[1], 'rotated_sigma_x':values[2], 'rotated_sigma_y':values[3], 'rotation_angle_mrad': 1000.*values[4], 'unrotated_mean_x':values[5], 'unrotated_mean_y':values[6], 'unrotated_sigma_x':values[7],'unrotated_sigma_y':values[8]} + json_data[key] = entry + with open(output_json_file, "w") as json_file: + json.dump(json_data, json_file, indent=4) + +def run_fit_beamspot(infile, selection, outdir, outfilename, target_pos=-4.3): + """ + Run the target vertex projection and beamspot fitting procedure. + + Args: + infile (str): Input ROOT file to load data. + selection (str): Selection defined in the input file. + outdir (str): Output directory. + outfilename (str): Output ROOT file name. + target_pos (float): Target location in z [mm]. Vertices are projected back to this location. + """ + if not os.path.exists(outdir): + os.makedirs(outdir) + + #load the necessary data + branches = ['unc_vtx_psum', 'unc_vtx_x', 'unc_vtx_y', 'unc_vtx_z', 'unc_vtx_px', 'unc_vtx_py', 'unc_vtx_pz', 'run_number'] + data = load_data(infile, selection, expressions=branches) + + #outfile = uproot.recreate(f'{outdir}/{outfilename}.root') + outfile = r.TFile(f'{outdir}/{outfilename}.root',"RECREATE") + outfile_json = f'{outdir}/{outfilename}.json' + + #get unique list of runs in data + run_list = np.unique(data.run_number) + + fit_results={} + for run in run_list: + run_data = data[data.run_number == run] + run = int(run) + + #Build histogram of vertex position projected to target location + proj_x_y_h = ( + hist.Hist.new + .Reg(100, -3.0, 3.0,label='Target Projected Vertex X [mm]') + .Reg(500, -1.5, 1.5,label='Target Projected Vertex Y [mm]') + .Double() + ) + + projected_x, projected_y = projectVertex(target_pos, run_data.unc_vtx_z, run_data.unc_vtx_pz, run_data.unc_vtx_px, + run_data.unc_vtx_py, run_data.unc_vtx_x, run_data.unc_vtx_y) + proj_x_y_h.fill(np.array(projected_x), np.array(projected_y)) + + root_proj_x_y_h = utils.cnvHistogramToROOT(proj_x_y_h) + outfile.cd() + c = utils.drawTH2(root_proj_x_y_h, f'target_projected_vertex_x_y_run_{run}', logZ=True) + c.Write() + c.SaveAs(f'{outdir}/{c.GetName()}.png') + + runVertex2DFit(root_proj_x_y_h, fit_results, run, outdir, outfile, nsigma=1.5) + + #Fit the target projected vertex x/y distribution with rotated 2d gaussian. + #fit_results = {} + #runVertex2DFit(root_proj_x_y_h, fit_results, run, outdir, outfile, nsigma=1.5) + writeFitResultsJson(fit_results, outfile_json) + + outfile.Close() + +#======================================================================================================================================= +# FIT BEAMSPOTS IN DATA AND MC +#======================================================================================================================================= + +# MC Tritrig beamspot +# These events are projected to the target and used to characterize the beamspot +infile = '/sdf/group/hps/user-data/alspellm/2016/tritrig_mc/pass4b/tritrig-beam-hadd-10kfiles-ana-smeared-corr.root' +selection = 'vtxana_Tight_nocuts' +outfilename = f'tritrig_mc_beamspot_fit_{selection}' +# Fit the beamspot +run_fit_beamspot(infile, selection, outdir, outfilename) + +# Data beamspot +# These events are projected to the target and used to characterize the beamspot +infile = '/sdf/group/hps/user-data/alspellm/2016/data/hadd_BLPass4c_1959files.root' +selection = 'vtxana_Tight_nocuts' +outdir = 'data_beampsot_fit' +outfilename = f'data_beamspot_fit_{selection}' +# Fit the beamspot +run_fit_beamspot(infile, selection, outdir, outfilename) + +#======================================================================================================================================= +# PLOT RESULTS +#======================================================================================================================================= + +# Load data fits +data_json = f'{outdir}/data_beamspot_fit_vtxana_Tight_nocuts.json' + +runs = [] +xpositions = [] +ypositions=[] +xwidths=[] +ywidths=[] +angles=[] + +with open(data_json, 'r') as file: + data = json.load(file) + for run, values in data.items(): + runs.append(run) + xpositions.append(values['unrotated_mean_x']) + ypositions.append(values['unrotated_mean_y']) + xwidths.append(values['unrotated_sigma_x']) + ywidths.append(values['unrotated_sigma_y']) + angles.append(values['rotation_angle_mrad']) +runs = [int(x) for x in runs] + + +# Load MC fits +mc_json = f'{outdir}/tritrig_mc_beamspot_fit_vtxana_Tight_nocuts.json' +with open(mc_json, 'r') as file: + data = json.load(file) + mc_rotated_mean_x = values['unrotated_mean_x'] + mc_rotated_mean_y = values['unrotated_mean_y'] + mc_rotated_sigma_x = values['unrotated_sigma_x'] + mc_rotated_sigma_y = values['unrotated_sigma_y'] + mc_rotation_angle_mrad = values['rotation_angle_mrad') + +# Plot data and MC beamspot x and y positions +fig, ax = plt.subplots(figsize=(30,20)) +plt.scatter(runs, xpositions, marker='o', s=150,label='Beamspot data', color='darkblue') +plt.scatter(runs, ypositions, marker='o', s=150,label='Beamspot data', color='teal') +plt.axhline(mc_unrotated_mean_x, linestyle='--', linewidth=5.0, color='darkblue', label ='Beamspot MC') +plt.axhline(mc_unrotated_mean_y, linestyle='--', linewidth=5.0, color='teal', label ='Beamspot MC') +plt.xlabel('Run Number') +plt.ylabel('Beamspot Position [mm]') +plt.legend(fontsize=40) +plt.savefig(f'{outdir}/data_fitted_beamspot_positions_unrotated.pdf') + +# Plot data and MC beamspot x and y widths +fig, ax = plt.subplots(figsize=(30,20)) +plt.scatter(runs, xwidths, marker='^', s=150,label='Beamspot $\sigma_{x}$ data', color='darkblue') +plt.scatter(runs, ywidths, marker='^', s=150,label='Beamspot $\sigma_{y}$ data', color='teal') +plt.axhline(mc_unrotated_sigma_x, linestyle='--', linewidth=5.0, color='darkblue', label ='Beamspot $\sigma_{x}$ MC') +plt.axhline(mc_unrotated_sigma_y, linestyle='--', linewidth=5.0, color='teal', label ='Beamspot $\sigma_{x}$ MC') +plt.xlabel('Run Number') +plt.ylabel('Beamspot Width [um]') +plt.legend(fontsize=40) +plt.savefig(f'{outdir}/data_fitted_beamspot_widths_unrotated.pdf') + +# Plot data and MC beamspot rotation angles +fig, ax = plt.subplots(figsize=(30,20)) +plt.scatter(runs, angles, marker='*', s=250,label='Beamspot Rotation Angle Data', color='darkred') +plt.axhline(mc_rotation_angle_mrad, linestyle='--', linewidth=5.0, color='darkblue', label ='Beamspot Rotation Angle MC') +plt.xlabel('Run Number') +plt.ylabel('Beam Rotation Angle [mrad]') +plt.legend(fontsize=40) +plt.savefig(f'{outdir}/data_fitted_beamspot_rotation_angles.pdf') + diff --git a/plotUtils/simps/mass_resolution/.moller_ana.py.swp b/plotUtils/simps/mass_resolution/.moller_ana.py.swp new file mode 100644 index 0000000000000000000000000000000000000000..5b60fb8b7e26cf4c89ab1e763ded5e87bc77e6ab GIT binary patch literal 16384 zcmeHNU5F*c6)uh0M2#jGFb^UecZl~+ywmq*c6P_nQP%x&2QoX0Gb=>Vrlz}kZZmbe z+pg-Fd);0CUKNCxhk$~DKj5Q6UVP9e@xd1ngk&Eiki6s}F9{?F{^YCvxp(f}&FrkO zFi16gbF0s(K6UD=I;X08YQ6R7(lX6EEr#nCj9qyDufYNKyU#!Q;t1nXB|^%cQSEjq zKL~lmTdld>YBBaqWw5DIuh~tMPTj4;ola}MnaEh5)uB{P74{O<>~W>sRH#^HI*g-c zFP2iIE?RMt!S1bCt!2P6a0vsM4rb%Jog84#qI=huU0f}HubcS&uDp(y^0f*5x267MDgWby{+m+&U@8A{ zLjQHCUn%8xmJU>Y{Ai-SoY0qQ`(qif3|Iy%1C{~HfMvikU>UFsSOzQumI2GaWf{_1Re#x!pD=>fct=p;PEryHQ*J%@VW&|TvT33tXwaS z{Lq|oaND#rvhQ|@iL z!sF8I(*rcqZnviCZn{#<`)TIhx#{Wg2I_lh;KKpsBPYQ4AV%5~sLCgM2+m#Nv7!%VL6=mak}SBJKQtYX=V= z%za$z3Q~B=Gx-r+ASJ_%u9gGx!ypi;h%~JWy)8sU%g2aEKKbaSGkkbCKR8Bt7YP1k zUdSb-VgvRR(u5#i1Ysn63M0~Jtnq$B8pJP-r-cwJ6LQ-|!Hvnf!UO zG+EHrC|O=CNeb2Sv7$V^z;d4mS1NRtMkKc2y&6i#<5ct$9Dh@}m&p60yJ3A}$;tpZH&gLVf@7sH`St%7*eJd_s7Mi=;Ybc$JUlsPOt7C zr&ye6KTN?OPdAiP+%Z#zDB-CRsgwV>MpmC@rHuQ&kz_qgv3hN0rg*T{pgd}E@Dc$m z0-i~|SS!y~=0`4rP^(&HrD!#`=o~GLoyps*`$BnXn3$+ster_?4|A1N+!F|+B-6@) z5gg=p~8jY~)oYn;gvWK2oTuPepw;W~#cW_4jPDzgLsIZ#r}t zCodr;_i~l#?Y$n4*5JW(LWbv~XGCwCHQRIh2uP zjz+uYq_m4e(GshdqP)P=Kp_k0uEP@~Y`>0tU!1O9=Sp0dJY0>?9BC2bv2r$q7SUF{ zcIwRP+9U2mM^{gb;uppdxvNXdXGmkUS34KX+|JOy9@oY@8Ls1ywka78K|$sJNN*Iq zL{^GNR#w)GD6Isj*{#aOQpAEZ?ol^Xno2fKpIvNY*jhC5y~XI>VMvt};C(9Jndq4C9_3{c zBb;v|%6*4Ps#x!qT72YZt++TtmW+H1<~`sj5U zuIDG@RpFq?-==Dh@@1UU!#u+-f=78@r|xQIDs8TZQ8O9nZXC@r^Zx$<-t8{}=Ka5V zRW xmax: + xmaxx = xmax + fitfunc.SetParameter(0, norm) + fitfunc.SetParameter(1, mu) + fitfunc.SetParameter(2, sigma) + fitRes = histo.Fit(fitfunc,"QLES","", xminx, xmaxx) + #If fit fails, skip + try: + if fitRes.Parameters()[1] < xmin or fitRes.Parameters()[1] > xmax or fitRes.Ndf() < 1: + continue + except: + continue + + params = fitRes.Parameters() #these results seed the next iteration...maybe should only do if improved? + chi2 = fitRes.Chi2() + ndf = fitRes.Ndf() + #replace best fit + if chi2/ndf < best_chi2: + best_params = params + + #Do the final fit using the best parameters found + fitfunc.SetParameter(0, best_params[0]) + fitfunc.SetParameter(1, best_params[1]) + fitfunc.SetParameter(2, best_params[2]) + xminx = best_params[1] - nsigma*best_params[2] + xmaxx = best_params[1] + nsigma*best_params[2] + + #again, if data, use asymmetric fit window to avoid the left shoulder + if isData: + if xminx < xmin: + xminx = xmin + if xmaxx > xmax: + xmaxx = xmax + + fitRes = histo.Fit(fitfunc,"QLES","", xminx, xmaxx) + params = fitRes.Parameters() + errors = fitRes.Errors() + chi2 = fitRes.Chi2() + ndf = fitRes.Ndf() + return histo, params, errors, chi2/ndf + +#======================================================================================================================================= +# LOAD DATA AND FIT FEE PEAK +#======================================================================================================================================= + +# Load FEEs tracks in data +data_results = {} +infilename = '/sdf/group/hps/user-data/alspellm/2016/fee_smearing/run7800/hadd/hadd_fee_2pt3_recon_fee_histos.root' #FEE skimmed tracks from hpstr track analysis processor + +# Read track hit histograms +histoname = 'KalmanFullTracks/KalmanFullTracks_p_vs_nHits_top_hh' #top +infile = r.TFile(f'{infilename}',"READ") +top_h = copy.deepcopy(infile.Get(f'{histoname}')) +histoname = 'KalmanFullTracks/KalmanFullTracks_p_vs_nHits_bot_hh' #bot +bot_h = copy.deepcopy(infile.Get(f'{histoname}')) +infile.Close() + +# Change the names to use as keys +top_h.SetName('top') +bot_h.SetName('bot') + +# Fit the FEE peak for each category of nhits. Just have access to 10, 11, 12 for now +for h in [top_h, bot_h]: + histo = h + for nhits in [10, 11, 12]: + # Get the nhits momentum projection + proj = histo.ProjectionY(f'proj_{h.GetName()}_{nhits}hits', histo.GetXaxis().FindBin(nhits), histo.GetXaxis().FindBin(nhits),"") + # Fit the data + _, params, errors, chi2ndf = gaus_fit(proj, 2.0, 2.5, 2.4, 0.47, 12000, nsigma=1.5, isData=True) + + # Store the results [mu,sigma] for top/bot nhits= + data_results[f'{h.GetName()}_nhits_{nhits}'] = [params[1], params[2]] + +#======================================================================================================================================= +# LOAD MC AND FIT FEE PEAK +#======================================================================================================================================= + +# Load MC FEE's from hpstr track analysis processor +mc_results = {} +infilename= '/sdf/group/hps/user-data/alspellm/2016/fee_smearing/tritrig/hadd/hadd_fee_2pt3_recon_tritrig_histos.root' + +# Read track hit histograms +histoname = 'KalmanFullTracks/KalmanFullTracks_p_vs_nHits_top_hh' #top +infile = r.TFile(f'{infilename}',"READ") +top_h = copy.deepcopy(infile.Get(f'{histoname}')) +histoname = 'KalmanFullTracks/KalmanFullTracks_p_vs_nHits_bot_hh' #bot +bot_h = copy.deepcopy(infile.Get(f'{histoname}')) +infile.Close() + +#Change the names to use as keys +top_h.SetName('top') +bot_h.SetName('bot') + +for h in [top_h, bot_h]: + histo = h + for nhits in [10, 11, 12]: + # Get the nhits momentum projection + proj = histo.ProjectionY(f'proj_{h.GetName()}_{nhits}hits', histo.GetXaxis().FindBin(nhits), histo.GetXaxis().FindBin(nhits),"") + # Fit the data + _, params, errors, chi2ndf = gaus_fit(proj, 2.1, 2.5, 2.2, 0.1, proj.GetMaximum(), nsigma=1.5) + # Store the results [mu, sigma] for top/bot nhits= + mc_results[f'{h.GetName()}_nhits_{nhits}'] = [params[1], params[2]] + +#======================================================================================================================================= +# CALCULATE MOMENTUM SMEARING FACTORS +#======================================================================================================================================= +# Store momentum smearing factors in ROOT file +outfile = r.TFile(f'{outdir}/smearingFile_2016_nhits.root',"RECREATE") +outfile.cd() +# Calculate smearing for Top and Bot +smtop_h = r.TH1F('KalmanFullTracks_p_vs_nHits_hh_smearing_rel_top','p_vs_nHits_smearing_rel_top;nhits;smear factor', 3, 9.5, 12.5) +smbot_h = r.TH1F('KalmanFullTracks_p_vs_nHits_hh_smearing_rel_bot','p_vs_nHits_smearing_rel_bot;nhits;smear factor', 3, 9.5, 12.5) + +# Calculate smearing factor according to 2016 Bump Hunt +smear_fac = lambda mu_data, sig_data, mu_mc, sig_mc : np.sqrt(np.square(sig_data/mu_data) - np.square(sig_mc/mu_mc)) +for key, vals in data_results.items(): + istop = False + if 'top' in key: + istop = True + nhits = float(key.split('_')[2]) + mu_data = vals[0] + sig_data = vals[1] + mu_mc = mc_results[key][0] + sig_mc = mc_results[key][1] + sf = smear_fac(mu_data, sig_data, mu_mc, sig_mc) + print(f'{key} sf={sf}') + + #save results + if istop: + smtop_h.SetBinContent(smtop_h.GetXaxis().FindBin(nhits), sf) + else: + smbot_h.SetBinContent(smbot_h.GetXaxis().FindBin(nhits), sf) + +outfile.Write() diff --git a/plotUtils/simps/mass_resolution/fit_mass_resolution.py b/plotUtils/simps/mass_resolution/fit_mass_resolution.py new file mode 100644 index 000000000..44fc06f75 --- /dev/null +++ b/plotUtils/simps/mass_resolution/fit_mass_resolution.py @@ -0,0 +1,342 @@ +#!/usr/bin/python3 +""" +This script fits the MC signal mass resolution as a function of invariant mass. +The mass resolution uses SIMP signal reconstructed vertex invariant mass with radMatchTight selection (truth matched ele). +Fill a histogram with invariant mass and fit the distribution with a Gaussian fit function. +Fit the mass resolution as a function of invariant mass with a polynomial. Use p-test to find best fit order. +""" +import os +import awkward as ak +import numpy as np +import hist +from hist import Hist +import uproot +import ROOT as r +import matplotlib.pyplot as plt +import matplotlib as mpl + +# SIMP tools defined in hpstr +import sys +hpstr_base = os.getenv('HPSTR_BASE') +sys.path.append(f'{hpstr_base}/plotUtils/simps') +import simp_plot_utils as utils + +plt.rcParams.update({'font.size': 40, # Font size for text + 'axes.titlesize': 40, # Font size for titles + 'axes.labelsize': 40, # Font size for axis labels + 'xtick.labelsize': 40, # Font size for x-axis tick labels + 'ytick.labelsize': 40, # Font size for y-axis tick labels + 'lines.linewidth':4.0, + 'legend.fontsize': 40}) # Font size for legend + +#======================================================================================================================================= +# INITIALIZE +#======================================================================================================================================= +# --outdir: Specify output directory. + +import argparse +parser = argparse.ArgumentParser(description='Process some inputs.') +parser.add_argument('--outdir', type=str, default='moller_mass_fits') +parser.add_argument('--outfilename', type=str, default='mass_resolution') +args = parser.parse_args() +outdir = args.outdir +outfilename = args.outfilename + +#======================================================================================================================================= +# FUNCTIONS +#======================================================================================================================================= + +def load_signal(filepath, selection, cuts=None, expressions=None): + #load signal with the hpstr ana processor flat tuple struct + with uproot.open(filepath) as f: + events = f[f'{selection}/{selection}_tree'].arrays( + expressions = expressions, cut = cuts + ) + return events + +def gaus_fit(histo, xmin, xmax, smean, swidth, snorm, nsigma=2.0, isData=False): + + print('seeds:', xmin, xmax, smean, swidth, snorm) + #initial fit with seeds + fitfunc = r.TF1("gaus","gaus") + fitfunc.SetParameter(0, snorm) + fitfunc.SetParameter(1, smean) + fitfunc.SetParameter(2, swidth) + fitRes = histo.Fit(fitfunc,"QLES","", xmin, xmax) + try: + params = fitRes.Parameters() + chi2 = fitRes.Chi2() + ndf = fitRes.Ndf() + + best_chi2 = chi2/ndf + best_params = params + except: + best_chi2 = 999999.9 + params = [snorm, smean, swidth] + best_params=params + + niters = 100 + for n in range(niters): + norm = params[0]#*np.random.uniform(80,120)*0.01 + mu = params[1]#*np.random.uniform(80,120)*0.01 + sigma = params[2]#*np.random.uniform(80,120)*0.01 + + xminx = mu - nsigma*sigma + xmaxx = mu + nsigma*sigma + if isData: + if xminx < xmin: + xminx = xmin + if xmaxx > xmax: + xmaxx = xmax + fitfunc.SetParameter(0, norm) + fitfunc.SetParameter(1, mu) + fitfunc.SetParameter(2, sigma) + fitRes = histo.Fit(fitfunc,"QLES","", xminx, xmaxx) + try: + if fitRes.Parameters()[1] < xminx or fitRes.Parameters()[1] > xmaxx or fitRes.Ndf() < 1: + continue + except: + continue + + params = fitRes.Parameters() + #print(params) + chi2 = fitRes.Chi2() + ndf = fitRes.Ndf() + if chi2/ndf < best_chi2: + best_params = params + + fitfunc.SetParameter(0, best_params[0]) + fitfunc.SetParameter(1, best_params[1]) + fitfunc.SetParameter(2, best_params[2]) + xminx = best_params[1] - nsigma*best_params[2] + xmaxx = best_params[1] + nsigma*best_params[2] + + if isData: + if xminx < xmin: + xminx = xmin + if xmaxx > xmax: + xmaxx = xmax + + print('result: ', xminx, xmaxx, best_params[1], best_params[2], best_params[0]) + fitRes = histo.Fit(fitfunc,"QLES","", xminx, xmaxx) + params = fitRes.Parameters() + errors = fitRes.Errors() + chi2 = fitRes.Chi2() + ndf = fitRes.Ndf() + if ndf > 0: + chi2 = chi2/ndf + else: + chi2 = 99999.9 + return histo, params, errors, chi2 + +def fit_with_poly(tgrapherrs): + polys = [] + chi2s = [] + fstats = [] + fitResults = [] + npoints = len(tgrapherrs.GetX()) + + for n in range(10): + fitfunc = r.TF1(f'pol{n}',f'pol{n}', tgrapherrs.GetX()[0], tgrapherrs.GetX()[-1]) + fitfunc.SetRange(30.0, 124) + fitfunc.SetLineColor(r.kRed) + fitfunc.SetMarkerSize(0.0) + fitRes = tgrapherrs.Fit(fitfunc,"SRQ") + chi2s.append(fitRes.Chi2()) + polys.append(n) + fitResults.append(fitRes) + + #Perform fstat test to see how much fit improves with additional order (why does this work?) + if n > 0: + fstats.append( (chi2s[n-1]-chi2s[n])*(npoints-n-1)/(chi2s[n])) + else: + fstats.append(0.0) + + #Pick the order that shows greatest positive improvement in fstat + best_diff = 0.0 + best_n = None + for n,fstat in enumerate(fstats): + if n == 0: + continue + diff = fstats[n-1] - fstat + if diff > 0 and diff > best_diff: + best_diff = diff + best_n = n + + print(f'best n: {best_n}') + return fitResults[best_n] + +#======================================================================================================================================= +# LOAD MC SIGNAL +#======================================================================================================================================= + +# Load MC signal with momentum smearing +indir = '/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/beam/smeared' +infilename = lambda mass: f'mass_{mass}_hadd-simp-beam_ana_smeared_corr.root' +# Masses of each MC generated signal mass +masses = [x for x in range(30,126,2)] + +# Specify any cuts. Here we put a cut on the number of hits on track based on the 2016 SIMP L1L1 analysis +inv_masses_h={} +cuts = '( (unc_vtx_psum > 1.0) & (unc_vtx_ele_track_nhits >= 7) & (unc_vtx_pos_track_nhits >= 7) )' +# Load each MC signal mass +for mass in masses: + infile = os.path.join(indir, infilename(mass)) + selection = 'vtxana_radMatchTight_2016_simp_SR_analysis' # Use radMatchTight + branches = ['unc_vtx_mass', 'unc_vtx_ele_track_nhits', 'unc_vtx_pos_track_nhits'] + # Load events from hpstr vertex analysis processor + tree = load_signal(infile,selection, expressions=branches, cuts=cuts) + + # Initialize histogram + inv_masses_h[f'mass_{mass}'] = ( + hist.Hist.new + .Reg(800,17.5,217.5,label='Invariant Mass [MeV]') + .Double() + ) + + # Fill histogram + inv_masses_h[f'mass_{mass}'].fill(tree.unc_vtx_mass*1000.) + +# Load MC signal WITHOUT momentum smearing +indir = '/sdf/group/hps/user-data/alspellm/2016/simp_mc/pass4b/beam/nosmearing' +infilename = lambda mass: f'mass_{mass}_hadd-simp-beam-ana-nosmearing.root' +inv_masses_unsm_h={} +for mass in masses: + infile = os.path.join(indir, infilename(mass)) + selection = 'vtxana_radMatchTight_2016_simp_SR_analysis' + branches = ['unc_vtx_mass' ] + tree = load_signal(infile,selection, expressions=branches, cuts=cuts) + + inv_masses_unsm_h[f'mass_{mass}'] = ( + hist.Hist.new + .Reg(800,17.5,217.5,label='Invariant Mass [MeV]') + .Double() + ) + + inv_masses_unsm_h[f'mass_{mass}'].fill(tree.unc_vtx_mass*1000.) + + +# Convert Histograms to ROOT for convenient fitting +for key, histo in inv_masses_h.items(): + h = utils.cnvHistogramToROOT(histo) + h.SetName(f'{key}_smeared') + inv_masses_h[key] = h + +for key, histo in inv_masses_unsm_h.items(): + h = utils.cnvHistogramToROOT(histo) + h.SetName(f'{key}_unsmeared') + inv_masses_unsm_h[key] = h + + +# Initialize output ROOT file to save fit results +outfile = r.TFile(f'{outdir}/{outfilename}',"RECREATE") +outfile.cd() +fit_results = {} + +#======================================================================================================================================= +# CALCULATE MC SIGNAL MASS RESOLUTION WITH SMEARING +#======================================================================================================================================= + +# Run over smeared MC signal and fit +for mass, histo in inv_masses_h.items(): + print(f'Fitting {mass}') + fit_histo, params, errors, chi2ndf = gaus_fit(histo, histo.GetXaxis().GetBinLowEdge(histo.FindFirstBinAbove(0)), histo.GetXaxis().GetBinLowEdge(histo.FindLastBinAbove(0)), histo.GetMean(), histo.GetRMS(), histo.GetMaximum(), nsigma=2.0, isData=False) + + #Draw Fit result to canvas, and save to png and to root file + text = [f'\mu = {round(params[1],2)}',f'\sigma = {round(params[2],2)}',f'\chi^{2}/n.d.f = {round(chi2ndf,2)}'] + xmin = params[1] - 4.0*params[2] + xmax = params[1] + 4.0*params[2] + ymax = params[0]*1.1 + c = utils.drawTH1s([histo], histo.GetName(), drawOpts=['hist'],xrange=[xmin,xmax], yrange=[0.0,ymax],size=(2000,1500), text=text, text_pos=[0.2,0.78], line_spacing=0.05) + c.Write() + c.SaveAs(f'{outdir}/{histo.GetName()}.png') + c.Close() + #save fit results + fit_results[mass] = [fit_histo, params, errors] + +# Fit the fitted mass resolution results as a function of invariant mass +fit_masses = sorted(np.array([float(x.replace('mass_','')) for x in fit_results.keys()], dtype=float)) +fit_res = np.array([fit_results[f'mass_{int(mass)}'][1][2] for mass in fit_masses], dtype=float) +fit_errs = np.array([fit_results[f'mass_{int(mass)}'][2][2] for mass in fit_masses], dtype=float) +zeros = np.array([0.0 for mass in fit_masses], dtype=float) + +# Make TGraphErrors to fit with polynomial and get mass resolution as function of mass +massRes_smeared_ge = r.TGraphErrors(len(fit_masses), np.array(fit_masses), np.array(fit_res), np.array(zeros), np.array(fit_errs)) +massRes_smeared_ge.GetXaxis().SetTitle('Invariant Mass [MeV]') +massRes_smeared_ge.GetYaxis().SetTitle('Invariant Mass Resolution [MeV]') +massRes_smeared_ge.GetYaxis().SetTitleOffset(1.) +fitResult_smeared = fit_with_poly(massRes_smeared_ge) +text_smeared = [f'Smeared: {fitResult_smeared.Parameters()}'] +c_smeared = utils.drawTGraphs([massRes_smeared_ge], 'smeared_mc', drawOpts=['AP'], text=text_smeared, + text_pos=[0.2,0.8]) +c_smeared.SaveAs(f'{outdir}/invariant_mass_resolution_function_smeared.png') +c_smeared.Write() + +#======================================================================================================================================= +# CALCULATE MC SIGNAL MASS RESOLUTION WITH SMEARING +#======================================================================================================================================= + +# Run over unsmeared MC signal and fit +for mass, histo in inv_masses_unsm_h.items(): + print(f'Fitting {mass}') + fit_histo, params, errors, chi2ndf = gaus_fit(histo, histo.GetXaxis().GetBinLowEdge(histo.FindFirstBinAbove(0)), histo.GetXaxis().GetBinLowEdge(histo.FindLastBinAbove(0)), histo.GetMean(), histo.GetRMS(), histo.GetMaximum(), nsigma=2.0, isData=False) + + # Draw Fit result to canvas, and save to png and to root file + text = [f'\mu = {round(params[1],2)}',f'\sigma = {round(params[2],2)}',f'\chi^{2}/n.d.f = {round(chi2ndf,2)}'] + xmin = params[1] - 4.0*params[2] + xmax = params[1] + 4.0*params[2] + ymax = params[0]*1.1 + c = utils.drawTH1s([histo], f'{histo.GetName()}_unsmeared', drawOpts=['hist'],xrange=[xmin,xmax], yrange=[0.0,ymax],size=(2000,1500), text=text, text_pos=[0.2,0.78], line_spacing=0.05) + c.Write() + c.SaveAs(f'{outdir}/{histo.GetName()}.png') + c.Close() + + # Save fit results + fit_results[mass] = [fit_histo, params, errors] + +# Fit the fitted mass resolution results as a function of invariant mass +fit_masses = sorted(np.array([float(x.replace('mass_','')) for x in fit_results.keys()], dtype=float)) +fit_res = np.array([fit_results[f'mass_{int(mass)}'][1][2] for mass in fit_masses], dtype=float) +fit_errs = np.array([fit_results[f'mass_{int(mass)}'][2][2] for mass in fit_masses], dtype=float) +zeros = np.array([0.0 for mass in fit_masses], dtype=float) + +# Make TGraphErrors to fit with polynomial and get mass resolution as function of mass +massRes_unsmeared_ge = r.TGraphErrors(len(fit_masses), np.array(fit_masses), np.array(fit_res), np.array(zeros), np.array(fit_errs)) +massRes_unsmeared_ge.GetXaxis().SetTitle('Invariant Mass [MeV]') +massRes_unsmeared_ge.GetYaxis().SetTitle('Invariant Mass Resolution [MeV]') +massRes_unsmeared_ge.GetYaxis().SetTitleOffset(1.0) +fitResult_unsmeared = fit_with_poly(massRes_unsmeared_ge) +text_unsmeared = [f'unsmeared: {fitResult_unsmeared.Parameters()}'] +c_unsmeared = utils.drawTGraphs([massRes_unsmeared_ge], 'unsmeared_mc', drawOpts=['AP'], text=text_unsmeared, + text_pos=[0.2,0.8]) +print(fitResult_unsmeared.Parameters()) +c_unsmeared.SaveAs(f'{outdir}/invariant_mass_resolution_function_unsmeared.png') +c_unsmeared.Write() + + +#======================================================================================================================================= +# SUMMARY PLOT +#======================================================================================================================================= + +colors = utils.getColorsHPS() +#format smeared +utils.format_th1(massRes_smeared_ge, title='Smeared MC', linecolor=r.kBlack, markerstyle=20, markercolor=r.kBlack) +massRes_smeared_ge.SetMarkerSize(2) +massRes_smeared_ge.GetYaxis().SetTitleOffset(0.5) +massRes_smeared_ge.GetYaxis().SetRangeUser(0.0,8.0) +#format unsmeared +utils.format_th1(massRes_unsmeared_ge, title='Un-smeared MC', linecolor=r.kBlack, markerstyle=4, markercolor=r.kBlack) +massRes_unsmeared_ge.SetMarkerSize(2) +massRes_unsmeared_ge.GetListOfFunctions().At(0).SetLineColor(colors[1]) + + +c = r.TCanvas('c','c',2500,1500) +c.cd() +massRes_smeared_ge.Draw('AP') +massRes_unsmeared_ge.Draw('PSAME') +c.Draw() +legend = utils.buildLegend([massRes_smeared_ge, massRes_unsmeared_ge], position=(0.3,0.7, 0.4, 0.8), titles=['Smeared MC', 'Un-smeared MC']) +legend.SetTextSize(0.03) +legend.Draw() +c.SaveAs(f'{outdir}/invariant_mass_resolution_fits.png') + diff --git a/plotUtils/simps/mass_resolution/moller_ana.py b/plotUtils/simps/mass_resolution/moller_ana.py new file mode 100644 index 000000000..93bb8d248 --- /dev/null +++ b/plotUtils/simps/mass_resolution/moller_ana.py @@ -0,0 +1,364 @@ +#!/usr/bin/python3 +""" +This script selects Moller events in MC and data, plots the invariant mass distributions, and fits them with a Gaussian +fit function to calculate the Moller mass resolutions. +This script compares data, unsmeared MC, and smeared MC. +""" +import os +import awkward as ak +import numpy as np +import hist +from hist import Hist +import uproot +import math +import ROOT as r +import matplotlib as mpl +import matplotlib.pyplot as plt +import sys + +# SIMP tools defined in hpstr +hpstr_base = os.getenv('HPSTR_BASE') +sys.path.append(f'{hpstr_base}/plotUtils/simps') +import simp_plot_utils as utils + + +plt.rcParams.update({'font.size': 40, # Font size for text + 'axes.titlesize': 40, # Font size for titles + 'axes.labelsize': 40, # Font size for axis labels + 'xtick.labelsize': 40, # Font size for x-axis tick labels + 'ytick.labelsize': 40, # Font size for y-axis tick labels + 'lines.linewidth':4.0, + 'legend.fontsize': 40}) # Font size for legend + +#======================================================================================================================================= +# INITIALIZE +#======================================================================================================================================= +# --outdir: Specify output directory. + +import argparse +parser = argparse.ArgumentParser(description='Process some inputs.') +parser.add_argument('--outdir', type=str, default='moller_mass_fits') +args = parser.parse_args() +outdir = args.outdir + +#======================================================================================================================================= +# FUNCTIONS +#======================================================================================================================================= + +def load_data(filepath, selection, cut_expression = None, expressions=None): + with uproot.open(filepath) as f: + events = f[f'{selection}/{selection}_tree'].arrays( + cut=cut_expression, + expressions = expressions + ) + return events + +def TrackFiducial(array, isData=False, isMC=False): + """ + The Moller track fiducial regions are defined in the 2016 Bump Hunt analysis note. + The selection is slightly different between MC and data. + """ + ele_fid = ak.full_like(array.unc_vtx_ele_track_ecal_y, False, dtype=bool) + pos_fid = ak.full_like(array.unc_vtx_pos_track_ecal_y, False, dtype=bool) + + if isData: + ele_condition_1 = ( + (array.unc_vtx_ele_track_ecal_y > 0) & + (array.unc_vtx_ele_track_ecal_y < 42.0) & + (array.unc_vtx_ele_track_ecal_y < 13 - 0.26 * array.unc_vtx_ele_track_ecal_x) & + (array.unc_vtx_ele_track_ecal_y > 18 - 0.08 * array.unc_vtx_ele_track_ecal_x) & + (((array.unc_vtx_ele_track_ecal_x > -125) & (array.unc_vtx_ele_track_ecal_x < -95)) | + ((array.unc_vtx_ele_track_ecal_x > -85) & (array.unc_vtx_ele_track_ecal_x < -55))) + ) + ele_condition_2 = ( + (array.unc_vtx_ele_track_ecal_y < 0) & + (array.unc_vtx_ele_track_ecal_y < -23) & + (array.unc_vtx_ele_track_ecal_y < -15 + 0.08 * array.unc_vtx_ele_track_ecal_x) & + (array.unc_vtx_ele_track_ecal_y > -18 + 0.22 * array.unc_vtx_ele_track_ecal_x) & + (((array.unc_vtx_ele_track_ecal_x > -75) & (array.unc_vtx_ele_track_ecal_x < -45)) | + ((array.unc_vtx_ele_track_ecal_x > -110) & (array.unc_vtx_ele_track_ecal_x < -95))) + ) + ele_fid = ele_condition_1 | ele_condition_2 + + pos_condition_1 = ( + (array.unc_vtx_pos_track_ecal_y > 0) & + (array.unc_vtx_pos_track_ecal_y < 42.0) & + (array.unc_vtx_pos_track_ecal_y < 13 - 0.26 * array.unc_vtx_pos_track_ecal_x) & + (array.unc_vtx_pos_track_ecal_y > 18 - 0.08 * array.unc_vtx_pos_track_ecal_x) & + (((array.unc_vtx_pos_track_ecal_x > -125) & (array.unc_vtx_pos_track_ecal_x < -95)) | + ((array.unc_vtx_pos_track_ecal_x > -85) & (array.unc_vtx_pos_track_ecal_x < -55))) + ) + pos_condition_2 = ( + (array.unc_vtx_pos_track_ecal_y < 0) & + (array.unc_vtx_pos_track_ecal_y < -23) & + (array.unc_vtx_pos_track_ecal_y < -15 + 0.08 * array.unc_vtx_pos_track_ecal_x) & + (array.unc_vtx_pos_track_ecal_y > -18 + 0.22 * array.unc_vtx_pos_track_ecal_x) & + (((array.unc_vtx_pos_track_ecal_x > -75) & (array.unc_vtx_pos_track_ecal_x < -45)) | + ((array.unc_vtx_pos_track_ecal_x > -110) & (array.unc_vtx_pos_track_ecal_x < -95))) + ) + pos_fid = pos_condition_1 | pos_condition_2 + + elif isMC: + ele_condition_1 = ( + (array.unc_vtx_ele_track_ecal_y > 0) & + (array.unc_vtx_ele_track_ecal_y > 23) & + (array.unc_vtx_ele_track_ecal_y > 15 - 0.1 * array.unc_vtx_ele_track_ecal_x) & + (array.unc_vtx_ele_track_ecal_y < 12 - 0.3 * array.unc_vtx_ele_track_ecal_x) & + (((array.unc_vtx_ele_track_ecal_x > -75) & (array.unc_vtx_ele_track_ecal_x < -50)) | + ((array.unc_vtx_ele_track_ecal_x > -130) & (array.unc_vtx_ele_track_ecal_x < -95))) + ) + ele_condition_2 = ( + (array.unc_vtx_ele_track_ecal_y < 0) & + (array.unc_vtx_ele_track_ecal_y < -22) & + (array.unc_vtx_ele_track_ecal_y < -15 + 0.1 * array.unc_vtx_ele_track_ecal_x) & + (array.unc_vtx_ele_track_ecal_y > -15 + 0.25 * array.unc_vtx_ele_track_ecal_x) & + (((array.unc_vtx_ele_track_ecal_x > -120) & (array.unc_vtx_ele_track_ecal_x < -94)) | + ((array.unc_vtx_ele_track_ecal_x > -75) & (array.unc_vtx_ele_track_ecal_x < -50))) + ) + ele_fid = ele_condition_1 | ele_condition_2 + + pos_condition_1 = ( + (array.unc_vtx_pos_track_ecal_y > 0) & + (array.unc_vtx_pos_track_ecal_y > 23) & + (array.unc_vtx_pos_track_ecal_y > 15 - 0.1 * array.unc_vtx_pos_track_ecal_x) & + (array.unc_vtx_pos_track_ecal_y < 12 - 0.3 * array.unc_vtx_pos_track_ecal_x) & + (((array.unc_vtx_pos_track_ecal_x > -75) & (array.unc_vtx_pos_track_ecal_x < -50)) | + ((array.unc_vtx_pos_track_ecal_x > -130) & (array.unc_vtx_pos_track_ecal_x < -95))) + ) + pos_condition_2 = ( + (array.unc_vtx_pos_track_ecal_y < 0) & + (array.unc_vtx_pos_track_ecal_y < -22) & + (array.unc_vtx_pos_track_ecal_y < -15 + 0.1 * array.unc_vtx_pos_track_ecal_x) & + (array.unc_vtx_pos_track_ecal_y > -15 + 0.25 * array.unc_vtx_pos_track_ecal_x) & + (((array.unc_vtx_pos_track_ecal_x > -120) & (array.unc_vtx_pos_track_ecal_x < -94)) | + ((array.unc_vtx_pos_track_ecal_x > -75) & (array.unc_vtx_pos_track_ecal_x < -50))) + ) + pos_fid = pos_condition_1 | pos_condition_2 + + array['ele_fid'] = ele_fid + array['pos_fid'] = pos_fid + + filtered_array = array[ele_fid & pos_fid] + return filtered_array + +def fit_w_gaussian(histo, nsigma=2.0): + + #Seed initial fit with a simple gaussian fit over xmin and xmax + xmin = histo.GetXaxis().GetBinLowEdge(histo.FindFirstBinAbove(0)) + xmax = histo.GetXaxis().GetBinLowEdge(histo.FindLastBinAbove(0)) + fitfunc = r.TF1("gaus","gaus", xmin, xmax) + fitRes = histo.Fit(fitfunc,"QMES") + params = fitRes.Parameters() + errors = fitRes.Errors() + chi2 = fitRes.Chi2() + ndf = fitRes.Ndf() + + #Iterate fit by randomly seeding sigma, keep fit with best chi2 + best_chi2ndf = chi2/ndf + best_params = params + best_errors = errors + for i in range(80): + norm = best_params[0] + mu = best_params[1] + sigma = np.random.uniform(100,200)*0.01 + + #establish new fit range + xmin = mu - nsigma*sigma + xmax = mu + nsigma*sigma + fitfunc.SetParameter(0, norm) + fitfunc.SetParameter(1, mu) + fitfunc.SetParameter(2, sigma) + fitRes = histo.Fit(fitfunc,"QMES","", xmin, xmax) + + params = fitRes.Parameters() + errors = fitRes.Errors() + chi2ndf = fitRes.Chi2()/fitRes.Ndf() + + if chi2ndf < best_chi2ndf: + best_params = params + best_chi2ndf = chi2ndf + best_errors = errors + + #Refit with best parameters + fitfunc.FixParameter(0, best_params[0]) + fitfunc.FixParameter(1, best_params[1]) + fitfunc.FixParameter(2, best_params[2]) + sigma = best_params[2] + xmin = mu - nsigma*sigma + xmax = mu + nsigma*sigma + histo.Fit(fitfunc,"QMES","", xmin, xmax) + print(fitfunc.GetParError(2)) + return histo, best_params, best_errors, best_chi2ndf + +#======================================================================================================================================= +# LOAD DATA +#======================================================================================================================================= + +samples = {} +branches = ["unc_vtx_mass","unc_vtx_psum", "unc_vtx_ele_track_t", "unc_vtx_pos_track_t","unc_vtx_ele_track_ecal_x", + "unc_vtx_ele_track_ecal_y", "unc_vtx_pos_track_ecal_x", "unc_vtx_pos_track_ecal_y", + "unc_vtx_ele_clust_x", "unc_vtx_ele_clust_y", "unc_vtx_pos_clust_x", "unc_vtx_pos_clust_y", + "unc_vtx_ele_track_px","unc_vtx_ele_track_py","unc_vtx_pos_track_px","unc_vtx_pos_track_py", + "unc_vtx_ele_track_pz", "unc_vtx_pos_track_pz", + "unc_vtx_ele_track_nhits", "unc_vtx_pos_track_nhits", + "unc_vtx_px", "unc_vtx_py", "unc_vtx_pz"] + +# Read Data Mollers Unconstrained +infile = '/sdf/group/hps/user-data/alspellm/run/new_run_scripts/mollers/hadd/hadd-sample0-moller-ana.root' +cut_expression = ('( (unc_vtx_ele_track_t > -3.0) & (unc_vtx_pos_track_t > -3.0) & (unc_vtx_ele_track_t < 2.5) & (unc_vtx_pos_track_t < 2.5) & (unc_vtx_psum > 2.1) & (unc_vtx_psum < 2.45))') +selection = 'vtxana_Tight_nocuts' +samples['data'] = load_data(infile,selection, cut_expression=cut_expression, expressions=branches) +samples['data']['vertex_psum'] = np.sqrt(np.square(samples['data'].unc_vtx_px) + np.square(samples['data'].unc_vtx_py) + np.square(samples['data'].unc_vtx_pz)) + +# Read MC mollers unconstrained no smearing +infile = '/sdf/group/hps/user-data/alspellm/run/new_run_scripts/mollers/hadd/hadd-molv4-beamv6-PhysicsRun2016-Pass2_iss650_singles0_ana.root' +cut_expression = ('( abs(unc_vtx_ele_track_t - unc_vtx_pos_track_t) < 2.5) & (unc_vtx_psum > 2.1) & (unc_vtx_psum < 2.45) ') +samples['mc'] = load_data(infile,selection, cut_expression = cut_expression, expressions=branches) +samples['mc']['vertex_psum'] = np.sqrt(np.square(samples['mc'].unc_vtx_px) + np.square(samples['mc'].unc_vtx_py) + np.square(samples['mc'].unc_vtx_pz)) + +# Read MC mollers unconstrained with smearing +infile = '/sdf/group/hps/user-data/alspellm/run/new_run_scripts/mollers/hadd/hadd-molv4-beamv6_HPS-PhysicsRun2016-Pass2_iss650_singles0_ana_smeared_topbot_corr.root' +samples['mc_smear'] = load_data(infile,selection, cut_expression=cut_expression, expressions=branches) +samples['mc_smear']['vertex_psum'] = np.sqrt(np.square(samples['mc_smear'].unc_vtx_px) + np.square(samples['mc_smear'].unc_vtx_py) + np.square(samples['mc_smear'].unc_vtx_pz)) + + +# Apply fiducial cuts +samples['data'] = TrackFiducial(samples['data'], isData=True) +samples['data_cons'] = TrackFiducial(samples['data_cons'], isData=True) +samples['mc'] = TrackFiducial(samples['mc'], isData=False, isMC=True) +samples['mc_smear'] = TrackFiducial(samples['mc_smear'], isData=False, isMC=True) + +#Cut on angular relationship? +samples['data']['theta_1'] = np.arctan( np.sqrt(np.square(samples['data'].unc_vtx_ele_track_py) + np.square(samples['data'].unc_vtx_ele_track_px))/samples['data'].unc_vtx_ele_track_pz ) +samples['data']['theta_2'] = np.arctan( np.sqrt(np.square(samples['data'].unc_vtx_pos_track_py) + np.square(samples['data'].unc_vtx_pos_track_px))/samples['data'].unc_vtx_pos_track_pz ) +samples['data_cons']['theta_1'] = np.arctan( np.sqrt(np.square(samples['data_cons'].unc_vtx_ele_track_py) + np.square(samples['data_cons'].unc_vtx_ele_track_px))/samples['data_cons'].unc_vtx_ele_track_pz ) +samples['data_cons']['theta_2'] = np.arctan( np.sqrt(np.square(samples['data_cons'].unc_vtx_pos_track_py) + np.square(samples['data_cons'].unc_vtx_pos_track_px))/samples['data_cons'].unc_vtx_pos_track_pz ) +samples['mc']['theta_1'] = np.arctan( np.sqrt(np.square(samples['mc'].unc_vtx_ele_track_py) + np.square(samples['mc'].unc_vtx_ele_track_px))/samples['mc'].unc_vtx_ele_track_pz ) +samples['mc']['theta_2'] = np.arctan( np.sqrt(np.square(samples['mc'].unc_vtx_pos_track_py) + np.square(samples['mc'].unc_vtx_pos_track_px))/samples['mc'].unc_vtx_pos_track_pz ) +samples['mc_smear']['theta_1'] = np.arctan( np.sqrt(np.square(samples['mc_smear'].unc_vtx_ele_track_py) + np.square(samples['mc_smear'].unc_vtx_ele_track_px))/samples['mc_smear'].unc_vtx_ele_track_pz ) +samples['mc_smear']['theta_2'] = np.arctan( np.sqrt(np.square(samples['mc_smear'].unc_vtx_pos_track_py) + np.square(samples['mc_smear'].unc_vtx_pos_track_px))/samples['mc_smear'].unc_vtx_pos_track_pz ) + +#======================================================================================================================================= +# PLOT INVARIANT MASS +#======================================================================================================================================= + +# Initialize invariant mass histograms +invm_h = ( + hist.Hist.new + .StrCategory(list(samples.keys()), name='sel') + .Reg(100, 30.0, 70,label='Invariant Mass [MeV]') + .Double() +) + +# Fill histograms +invm_h.fill('data', samples['data'].unc_vtx_mass*1000.)#, weight = 1./len(samples['data'].unc_vtx_mass)) +invm_h.fill('data_cons', samples['data_cons'].unc_vtx_mass*1000.)#, weight = 1./len(samples['data_cons'].unc_vtx_mass)) +invm_h.fill('mc', samples['mc'].unc_vtx_mass*1000.)#, weight = 0.7/len(samples['mc'].unc_vtx_mass)) +invm_h.fill('mc_smear', samples['mc_smear'].unc_vtx_mass*1000.)#, weight = 1.7/len(samples['mc_smear'].unc_vtx_mass)) + +# Plot invariant mass histograms +fig, ax = plt.subplots(figsize=(25,15)) +invm_h.plot() +plt.legend() +plt.xlabel('Invariant Mass [MeV]') + + +# Plot showing fiducial cuts +fig, ax = plt.subplots(figsize=(25,15)) +plt.scatter(samples['mc'].unc_vtx_ele_track_ecal_x, samples['mc'].unc_vtx_ele_track_ecal_y) +plt.scatter(samples['mc'].unc_vtx_pos_track_ecal_x, samples['mc'].unc_vtx_pos_track_ecal_y) +plt.xlabel('Track at Ecal x [mm]') +plt.ylabel('Track at Ecal y [mm]') +# Generate x data points +x = np.linspace(-140, -40, 1000) + +# Data Cuts +# Positive Y Region +y1_data_pos = 13 - 0.26 * x +y2_data_pos = 18 - 0.08 * x + +# Negative Y Region +y1_data_neg = -15 + 0.08 * x +y2_data_neg = -18 + 0.22 * x +# Plot Data Cuts +plt.plot(x, y1_data_pos, label='Data Cut y < 13 - 0.26 * x',color='black') +plt.plot(x, y2_data_pos, label='Data Cut y > 18 - 0.08 * x',color='black') +plt.plot(x, y1_data_neg, label='Data Cut y < -15 + 0.08 * x',color='black') +plt.plot(x, y2_data_neg, label='Data Cut y > -18 + 0.22 * x',color='black') +plt.text(-100,0,'Data') +plt.xlim(-160,-35) +plt.ylim(-50,50) + + +# Plot angles theta1 and theta2 around beam axis +# Possibly useful to get higher purity Moller sample +fig, ax = plt.subplots(figsize=(25,15)) +plt.scatter(samples['mc'].theta_1, samples['mc'].theta_2) +plt.xlabel('arctan(sqrt(px^2+py^2)/pz) track 1') +plt.ylabel('arctan(sqrt(px^2+py^2)/pz) track 2') +plt.plot([0.02, 0.05],[0.05,0.02], color='red') +plt.plot([0.02, 0.058],[0.058,0.02], color='red') +coefficients = np.polyfit([0.02, 0.05],[0.05,0.02], 1) +slope, intercept = coefficients +coefficients = np.polyfit([0.02, 0.058],[0.058,0.02], 1) +slope, intercept = coefficients + + +fig, ax = plt.subplots(figsize=(25,15)) +plt.scatter(samples['data'].theta_1, samples['data'].theta_2) +plt.xlabel('arctan(sqrt(px^2+py^2)/pz) track 1') +plt.ylabel('arctan(sqrt(px^2+py^2)/pz) track 2') +plt.plot([0.02, 0.05],[0.05,0.02], color='red') +plt.plot([0.02, 0.058],[0.058,0.02], color='red') +coefficients = np.polyfit([0.02, 0.05],[0.05,0.02], 1) +slope, intercept = coefficients +coefficients = np.polyfit([0.02, 0.058],[0.058,0.02], 1) +slope, intercept = coefficients + +#======================================================================================================================================= +# FIT MOLLER MASS +#======================================================================================================================================= + +# Convert invariant mass histograms to ROOT histograms +moller_masses_h = {} +for sname, sample in samples.items(): + h = utils.cnvHistogramToROOT(invm_h[sname,:]) + moller_masses_h[sname] = h + + +# Fit the data Moller mass peak +histo = moller_masses_h['data'] +fit_histo, params, errors, chi2ndf = fit_w_gaussian(histo, nsigma=2.0) + +# Plot data fit +text = [f'\mu = {round(params[1],2)} \pm {round(errors[1],3)}',f'\sigma = {round(params[2],2)} \pm {round(errors[2],3)}',f'\chi^{2}/n.d.f = {round(chi2ndf,4)}'] +xmin = params[1] - 4.0*params[2] +xmax = params[1] + 4.0*params[2] +ymax = params[0]*1.1 +c = utils.drawTH1s([histo], histo.GetName(), drawOpts=['hist'],xrange=[xmin,xmax], yrange=[0.0,ymax],size=(2040,1080), text=text, text_pos=[0.2,0.78], line_spacing=0.05) +c.SaveAs('moller_fit_data.png') + +# Fit the MC Moller mass peak without FEE calibrated momentum smearing +histo = moller_masses_h['mc'] +fit_histo, params, errors, chi2ndf = fit_w_gaussian(histo, nsigma=2.0) + +# Plot MC Moller unsmeared fit +text = [f'\mu = {round(params[1],2)} \pm {round(errors[1],3)}',f'\sigma = {round(params[2],2)} \pm {round(errors[2],3)}',f'\chi^{2}/n.d.f = {round(chi2ndf,4)}'] +xmin = params[1] - 4.0*params[2] +xmax = params[1] + 4.0*params[2] +ymax = params[0]*1.1 +c = utils.drawTH1s([histo], histo.GetName(), drawOpts=['hist'],xrange=[xmin,xmax], yrange=[0.0,ymax],size=(2040,1080), text=text, text_pos=[0.2,0.78], line_spacing=0.05) +c.SaveAs('moller_fit_mc_unsmeared.png') + + +# Fit the MC Moller mass peak WITH FEE calibrated momentum smearing applied +histo = moller_masses_h['mc_smear'] +fit_histo, params, errors, chi2ndf = fit_w_gaussian(histo, nsigma=2.0) + +# Plot MC Moller SMEARED fit +text = [f'\mu = {round(params[1],2)} \pm {round(errors[1],3)}',f'\sigma = {round(params[2],2)} \pm {round(errors[2],3)}',f'\chi^{2}/n.d.f = {round(chi2ndf,4)}'] +xmin = params[1] - 4.0*params[2] +xmax = params[1] + 4.0*params[2] +ymax = params[0]*1.1 +c = utils.drawTH1s([histo], histo.GetName(), drawOpts=['hist'],xrange=[xmin,xmax], yrange=[0.0,ymax],size=(2040,1080), text=text, text_pos=[0.2,0.78], line_spacing=0.05) +c.SaveAs('moller_fit_mc_smeared.png') diff --git a/plotUtils/simps/simp_plot_utils.py b/plotUtils/simps/simp_plot_utils.py new file mode 100644 index 000000000..208155460 --- /dev/null +++ b/plotUtils/simps/simp_plot_utils.py @@ -0,0 +1,597 @@ +import ROOT as r +import numpy as np +from array import array +import copy +import uproot + +def getColorsHPS(): + colors = [r.kBlue+2, r.kCyan+2, r.kRed+2, r.kOrange+10, r.kYellow+2, r.kGreen-1, r.kAzure-2, r.kGreen-8, r.kOrange+3, r.kYellow+2, r.kRed+2, r.kBlue+2, r.kGreen-8, r.kOrange+3, r.kYellow+2, r.kRed+2, r.kBlue+2, r.kGreen-8, r.kOrange+3, r.kYellow+2, r.kRed+2, r.kBlue+2, r.kGreen-8, r.kOrange+3, r.kYellow+2, r.kRed+2, r.kBlue+2, r.kGreen-8, r.kOrange+3] + return colors + +def plot_zbi_multigraph(graph_nsig, graph_nbkg, graph_zbi, canvas_name, leftyaxis_title, rightyaxis_title): + # Create TMultiGraph to hold all TGraphs + multi_graph = r.TMultiGraph() + + # Add nsig and nbkg graphs to the multi_graph (use left y-axis) + multi_graph.Add(graph_nsig) + multi_graph.Add(graph_nbkg) + + # Create a TCanvas to draw the multi_graph + canvas = r.TCanvas(canvas_name, canvas_name, 2040, 1080) + + # Draw the multi_graph with left y-axis (default) + multi_graph.Draw("AP") + + c.Draw() + # Set axis labels and titles for the left y-axis + multi_graph.GetYaxis().SetTitle(leftyaxis_title) + multi_graph.GetYaxis().SetTitleOffset(1.2) + + # Create a second y-axis for the ZBi graph + axis_zbi = multi_graph.GetHistogram().GetXaxis() # Copy the x-axis + axis_zbi.SetTitle(rightyaxis_title) # Set title for the right y-axis + axis_zbi.SetTitleOffset(1.2) # Adjust title offset for the right y-axis + axis_zbi.SetLabelOffset(999) # Hide labels for the right y-axis + + # Draw the ZBi graph with the right y-axis + graph_zbi.SetLineColor(r.kRed) # Set color for better visibility + graph_zbi.Draw("same") + graph_zbi.SetMarkerColor(r.kRed) # Set marker color + + # Set the second y-axis for the ZBi graph + multi_graph.GetHistogram().GetListOfFunctions().Add(axis_zbi) + + # Update the canvas + canvas.Update() + + # Return the canvas + return canvas + +#def SetMyStyle(tsize=0.025, tzsize=0.025, font=42, setOptTitle=0, setOptStat=0, setOptFit=0): +def SetMyStyle(tsize=0.05, tzsize=0.05, font=42, setOptTitle=0, setOptStat=0, setOptFit=0): + print("SETTING MY STYLE") + + colors = getColorsHPS() + r.gROOT.SetBatch(1) + + myStyle = r.TStyle("myStyle", "my style") + + # Set your custom attributes here + myStyle.SetOptTitle(setOptTitle) + myStyle.SetOptStat(setOptStat) + myStyle.SetOptFit(setOptFit) + myStyle.SetTitleFont(font) + myStyle.SetTitleSize(tsize) + #myStyle.SetTitleX(0.5) + #myStyle.SetTitleY(0.98) + + #Set legend text size + myStyle.SetLegendTextSize(0.02) + + # Set the title text color to black + myStyle.SetTitleTextColor(r.kBlack) + + # use plain black on white colors + icol = 0 + myStyle.SetFrameBorderMode(icol) + myStyle.SetCanvasBorderMode(icol) + myStyle.SetPadBorderMode(icol) + myStyle.SetPadColor(icol) + myStyle.SetCanvasColor(icol) + myStyle.SetStatColor(icol) + + # set the paper & margin sizes + myStyle.SetPaperSize(20, 26) + myStyle.SetPadTopMargin(0.10) + myStyle.SetPadRightMargin(0.05) + myStyle.SetPadRightMargin(0.10) + myStyle.SetPadBottomMargin(0.15) + myStyle.SetPadLeftMargin(0.10) + + myStyle.SetTextSize(tsize) + myStyle.SetLabelFont(font, "x") + myStyle.SetTitleFont(font, "x") + myStyle.SetLabelFont(font, "y") + myStyle.SetTitleFont(font, "y") + myStyle.SetLabelFont(font, "z") + myStyle.SetTitleFont(font, "z") + + myStyle.SetLabelSize(tsize, "x") + myStyle.SetTitleSize(tsize, "x") + myStyle.SetLabelSize(tsize, "y") + myStyle.SetTitleSize(tsize, "y") + myStyle.SetLabelSize(tzsize, "z") + myStyle.SetTitleSize(tzsize, "z") + + myStyle.SetTitleOffset(1.25, "y") + myStyle.SetTitleOffset(1.5, "x") + + #use bold lines and markers + myStyle.SetMarkerSize(1.0) + myStyle.SetMarkerStyle(8) + myStyle.SetMarkerColor(1) + myStyle.SetLineColor(1) + myStyle.SetHistLineWidth(3) + #myStyle.SetLineStyleString(2, "[12 12]") # postscript dashes + + # put tick marks on top and RHS of plots + #myStyle.SetPadTickX(1) + #myStyle.SetPadTickY(1) + + r.gROOT.SetStyle("myStyle") + r.gROOT.ForceStyle() + + NRGBs = 5 + NCont = 255 + + stops = array("d", [0.00, 0.34, 0.61, 0.84, 1.00]) + red = array("d", [0.00, 0.00, 0.87, 1.00, 0.51]) + green = array("d", [0.00, 0.81, 1.00, 0.20, 0.00]) + blue = array("d", [0.51, 1.00, 0.12, 0.00, 0.00]) + r.TColor.CreateGradientColorTable(NRGBs, stops, red, green, blue, NCont) + + return myStyle + + + +def buildLegend(graphs,titles=[], position=(0.50,0.6,0.85,0.9),clear_legend=True, text_size=0.030, entry_format=None): + legend = r.TLegend(*position) + #legend = r.TLegend() + # Set the legend to transparent (clear) if the option is specified + if clear_legend: + legend.SetFillStyle(0) + legend.SetFillColor(0) + legend.SetLineColor(0) + legend.SetBorderSize(0) + legend.SetTextSize(text_size) + + for i,graph in enumerate(graphs): + if entry_format is None: + if len(titles) < 1: + legend.AddEntry(graph, graph.GetTitle()) + else: + legend.AddEntry(graph, titles[i]) + else: + if len(titles) < 1: + legend.AddEntry(graph, graph.GetTitle(),"%s"%(entry_format[i])) + else: + legend.AddEntry(graph, titles[i],"%s"%(entry_format[i])) + return legend + + +def setTitle(hist, title=None, titlex=None, titley=None, showTitle=False): + if title is not None: + hist.SetTitle(title) + if titlex is not None: + hist.GetXaxis().SetTitle(titlex) + if titley is not None: + hist.GetYaxis().SetTitle(titley) + if showTitle: + r.gStyle.SetOptTitle(1) + else: + r.gStyle.SetOptTitle(0) + +def drawStatsBox(hist, statsPos=[0.7,0.9,0.7,0.9]): + statsbox = hist.FindObject("stats") + statsbox.SetX1NDC(statsPos[0]) + statsbox.SetX2NDC(statsPos[1]) + statsbox.SetY1NDC(statsPos[2]) + statsbox.SetY2NDC(statsPos[3]) + +def drawTH2(hist, canvas_name, drawOpt='colz', xrange = (None,None), yrange=(None,None),rebinx=None, + rebiny=None, size=(2200,1600), logX=False, logY=False, logZ=False, save=False, outdir='.', + drawStats=False, statsPos=[0.7,0.9,0.7,0.9], + text=[], text_pos = [0.2, 0.8], line_spacing=0.03, text_size=0.025, Hps=True): + + c = r.TCanvas(f'{canvas_name}',f'{canvas_name}',size[0], size[1]) + c.cd() + r.gROOT.ForceStyle() + hist.Draw(f'{drawOpt}') + r.gPad.Update() + c.UseCurrentStyle() + if rebinx is not None: + hist.RebinX(rebinx) + if rebiny is not None: + hist.RebinY(rebiny) + + if drawStats: + drawStatsBox(hist, statsPos) + else: + hist.SetStats(0) + + #Set Xrange + xmin = hist.GetXaxis().GetXmin() + xmax = hist.GetXaxis().GetXmax() + if xrange[0] is not None: + xmin = xrange[0] + if xrange[1] is not None: + xmax = xrange[1] + hist.GetXaxis().SetRangeUser(0.9*xmin,1.1*xmax) + + #Set Yrange + ymin = hist.GetYaxis().GetXmin() + ymax = hist.GetYaxis().GetXmax() + if yrange[0] is not None: + ymin = yrange[0] + if yrange[1] is not None: + ymax = yrange[1] + hist.GetYaxis().SetRangeUser(0.9*ymin,1.1*ymax) + + #Set Log + if logX: + c.SetLogx(1) + else: + c.SetLogx(0) + if logY: + c.SetLogy(1) + else: + c.SetLogy(0) + if logZ: + c.SetLogz(1) + else: + c.SetLogz(0) + + #Draw latex text + if len(text) > 0 or Hps: + drawText = insertText(text, text_pos, line_spacing, text_size, Hps) + drawText.Draw() + r.gPad.Update() + + if save: + c.SaveAs(f'{outdir}/{canvas_name}.png') + c.Close() + else: + return c + +def insertText(text=[], text_pos = [0.2, 0.98], line_spacing=0.04, text_size=0.035, Hps=True): + latex = r.TLatex() + latex.SetTextSize(text_size) + text_x = text_pos[0] + text_y = text_pos[1] + if (Hps): + latex.DrawLatexNDC(text_x, text_y,'#bf{#it{HPS}} Internal') + text_y = text_y - line_spacing + + for line in text: + latex.DrawLatexNDC(text_x, text_y,line) + text_y = text_y - line_spacing + return latex + +def drawTH1s(histograms, canvas_name, drawOpts=[], xrange = (None,None), yrange=(None,None),rebinx=None, legend=None, + rebiny=None, size=(1280,720), logX=False, logY=False, logZ=False, save=False, outdir='.', + drawStats=False, statsPos=[0.7,0.9,0.7,0.9], text=[], text_pos = [0.15, 0.8], line_spacing=0.03, text_size=0.025, Hps=True, freezeXaxis=True): + + c = r.TCanvas(f'{canvas_name}',f'{canvas_name}',size[0], size[1]) + c.cd() + r.gROOT.ForceStyle() + c.UseCurrentStyle() + + # Find the maximum x and y values among all histograms + min_x = min(h.GetBinLowEdge(h.FindFirstBinAbove(0.0)) for h in histograms) + max_x, max_y = max(h.GetBinLowEdge(h.FindLastBinAbove(0.0)) + h.GetBinWidth(0) for h in histograms), max(h.GetMaximum() for h in histograms) + min_y = 1e10 + for h in histograms: + local_miny = 1e10 + min_y_l = h.GetBinContent(h.FindFirstBinAbove(0.0)) + min_y_u = h.GetBinContent(h.FindLastBinAbove(0.0)) + if min_y_l < min_y_u: + local_miny = min_y_l + else: + local_miny = min_y_u + if local_miny < min_y: + min_y = local_miny + if(freezeXaxis == False): + h.SetAxisRange(0.95*min_x, 1.05*max_x, "X") + h.GetXaxis().SetRangeUser(0.95*min_x,1.05*max_x) + h.SetAxisRange(min_y, 1.05 * max_y, "Y") + h.GetYaxis().SetRangeUser(min_y, 1.05 * max_y) + + for i, gr in enumerate(histograms): + if xrange[0] is not None and xrange[0] is not None: + gr.GetXaxis().SetRangeUser(0.95*xrange[0], 1.05*xrange[1]) + if yrange[0] is not None: + min_y = yrange[0] + if yrange[1] is not None: + max_y = yrange[1] + if min_y < 0: + gr.GetYaxis().SetRangeUser(0.0, 1.05*max_y) + else: + gr.GetYaxis().SetRangeUser(0.95*min_y, 1.05*max_y) + + #Draw histograms + r.gPad.Update() + if i < 1: + gr.Draw('%s'%(drawOpts[i])) + gr.SetLineColor(colors[i]) + r.gPad.Update() + if drawStats: + drawStatsBox(gr, statsPos) + else: + gr.SetStats(0) + else: + gr.Draw('%sSAME'%(drawOpts[i])) + gr.SetLineColor(colors[i]) + r.gPad.Update() + if drawStats: + drawStatsBox(gr, statsPos) + else: + gr.SetStats(0) + + #Draw fit functions if present + if len(gr.GetListOfFunctions()) > 0: + func_name = gr.GetListOfFunctions().At(0).GetName() + func = gr.GetFunction("%s"%(func_name)) + func.Draw("%sSAME"%(drawOpts[i])) + func.SetLineColor(r.kRed) + + #Draw legend + if legend is not None: + legend.Draw() + c.Update() + + #Set Log + if logX: + c.SetLogx(1) + else: + c.SetLogx(0) + if logY: + c.SetLogy(1) + else: + c.SetLogy(0) + if logZ: + c.SetLogz(1) + else: + c.SetLogz(0) + + #Draw latex text + if len(text) > 0 or Hps: + drawText = insertText(text, text_pos, line_spacing, text_size, Hps) + drawText.Draw() + c.Update() + + c.Draw() + if save: + c.SaveAs(f'{outdir}/{canvas_name}.png') + c.Close() + else: + return c + +def drawTGraphs(graphs, canvas_name, drawOpts=[], xrange = (None,None), yrange=(None,None),rebinx=None, legend=None, + rebiny=None, size=(2200,1600), logX=False, logY=False, logZ=False, save=False, outdir='.', + drawStats=False, statsPos=[0.7,0.9,0.7,0.9], text=[], text_pos = [0.15, 0.8], line_spacing=0.03, text_size=0.025, Hps=True): + + c = r.TCanvas(f'{canvas_name}',f'{canvas_name}',size[0], size[1]) + c.cd() + r.gROOT.ForceStyle() + c.UseCurrentStyle() + + #Set Range + ymin = 9999.9 + ymax = -9999.9 + for i, gr in enumerate(graphs): + num_points = gr.GetN() + y_values = np.array([gr.GetY()[i] for i in range(num_points)]) + local_ymax = np.max(y_values) + if local_ymax > ymax: + ymax = local_ymax + local_ymin = np.min(y_values) + if local_ymin < ymin: + ymin = local_ymin + + for i, gr in enumerate(graphs): + if xrange[0] is not None and xrange[0] is not None: + gr.GetHistogram().GetXaxis().SetRangeUser(0.9*xrange[0], 1.1*xrange[1]) + if yrange[0] is not None: + ymin = yrange[0] + if yrange[1] is not None: + ymax = yrange[1] + if ymin < 0: + gr.GetHistogram().GetYaxis().SetRangeUser(1.2*ymin, 1.2*ymax) + else: + gr.GetHistogram().GetYaxis().SetRangeUser(0.8*ymin, 1.2*ymax) + + #Draw Graphs + if i < 1: + gr.Draw('A%s'%(drawOpts[i])) + r.gPad.Update() + if drawStats: + drawStatsBox(gr, statsPos) + else: + gr.GetHistogram().SetStats(0) + else: + gr.Draw('%sSAME'%(drawOpts[i])) + r.gPad.Update() + if drawStats: + drawStatsBox(gr, statsPos) + else: + gr.GetHistogram().SetStats(0) + + #Draw fit functions if present + if len(gr.GetListOfFunctions()) > 0: + func_name = gr.GetListOfFunctions().At(0).GetName() + func = gr.GetFunction("%s"%(func_name)) + func.Draw("%sSAME"%(drawOpts[i])) + + #Draw legend + if legend is not None: + legend.Draw() + c.Update() + + #Set Log + if logX: + c.SetLogx(1) + else: + c.SetLogx(0) + if logY: + c.SetLogy(1) + else: + c.SetLogy(0) + if logZ: + c.SetLogz(1) + else: + c.SetLogz(0) + + #Draw latex text + if len(text) > 0 or Hps: + drawText = insertText(text, text_pos, line_spacing, text_size, Hps) + drawText.Draw() + c.Update() + + c.Draw() + if save: + c.SaveAs(f'{outdir}/{canvas_name}.png') + c.Close() + else: + return c + +def readROOTHisto(infilename, histoname): + infile = r.TFile(f'{infilename}',"READ") + histo = copy.deepcopy(infile.Get(f'{histoname}')) + infile.Close() + return histo + +def cnvHistosToROOT(histos=[], tempname='temporary_uproot'): + return_histos = [] + uproot_file = uproot.recreate(f'trash_{tempname}.root') + for i, histo in enumerate(histos): + uproot_file[f'histo_{i}'] = histo + uproot_file.close() + infile = r.TFile(f'trash_{tempname}.root',"READ") + for i, histo in enumerate(histos): + return_histos.append(copy.deepcopy(infile.Get(f'histo_{i}'))) + infile.Close() + return return_histos + +def cnvHistogramToROOT(histo, tempname='temporary_uproot'): + uproot_file = uproot.recreate(f'trash_{tempname}.root') + uproot_file['histogram'] = histo + root_hist = readROOTHisto(f'trash_{tempname}.root', 'histogram;1') + uproot_file.close() + return root_hist + +def cnvHistoToROOT(histoname, histo, tempname='temporary_uproot'): + uproot_file = uproot.recreate(f'trash_{tempname}.root') + uproot_file[histoname] = histo + infile = r.TFile(f'trash_{tempname}.root',"READ") + root_hist = copy.deepcopy(infile.Get(f'{histoname}')) + infile.Close() + uproot_file.close() + return root_hist + +def quickDraw(plot, name='c', drawOpts=""): + c = r.TCanvas(name,name,1400,700) + c.cd() + plot.Draw(drawOpts) + c.Draw() + return c + +def format_th1(histo, name=None, title=None, xlabel=None, ylabel=None, linecolor=None, linewidth=None, markerstyle=None, + markercolor=None, xrang=(None, None), yrang=(None,None), rebin=None, labelsize=None, + titlesize=None, xtitleoffset=None, ytitleoffset=None): + if name: + histo.SetName(name) + if title: + histo.SetTitle(title) + if xlabel: + histo.GetXaxis().SetTitle(xlabel) + if ylabel: + histo.GetYaxis().SetTitle(ylabel) + if linecolor: + histo.SetLineColor(linecolor) + if linewidth: + histo.SetLineWidth(linewidth) + if markerstyle: + histo.SetMarkerStyle(markerstyle) + if markercolor: + histo.SetMarkerColor(markercolor) + if xrang[0]: + histo.GetXaxis().SetRangeUser(xrang[0],xrang[1]) + if yrang[0]: + histo.GetYaxis().SetRangeUser(yrang[0], yrang[1]) + if rebin: + histo.Rebin(rebin) + if labelsize: + histo.GetXaxis().SetLabelSize(labelsize) + histo.GetYaxis().SetLabelSize(labelsize) + if titlesize: + histo.GetXaxis().SetTitleSize(titlesize) + histo.GetYaxis().SetTitleSize(titlesize) + if xtitleoffset: + histo.GetXaxis().SetTitleOffset(xtitleoffset) + if ytitleoffset: + histo.GetYaxis().SetTitleOffset(ytitleoffset) + +def plot_ratio_th1s(top_plots, bot_plots, cname, size=(1800,1400), top_drawOpts="pe", bot_drawOpts="pe", + topx1=0.0, topy1=0.4, topx2=1.0, topy2=1.0, top_logY=False, + botx1=0.0, boty1=0.0, botx2=1.0, boty2=0.4, bot_logY=False, + top_botmarg=0.1, top_topmarg=None, bot_topmarg=None, bot_botmarg=0.1, leftmarg=0.1): + + c = r.TCanvas(cname, cname, size[0], size[1]) + top = r.TPad("%s_top"%(cname), "%s_top"%(cname), topx1, topy1, topx2, topy2) + top.SetBottomMargin(top_botmarg) + top.SetLeftMargin(leftmarg) + if top_topmarg: + top.SetTopMargin(top_topmarg) + top.Draw() + + + + bot = r.TPad("%s_bot"%(cname), "%s_bot"%(cname), botx1, boty1, botx2, boty2) + bot.SetBottomMargin(bot_botmarg) + bot.SetLeftMargin(leftmarg) + if bot_topmarg: + bot.SetTopMargin(bot_topmarg) + bot.Draw() + + top.cd() + for i,plot in enumerate(top_plots): + if i < 1: + plot.Draw(top_drawOpts) + else: + plot.Draw("%ssame"%(top_drawOpts)) + + r.gPad.SetLogy(top_logY) + + + bot.cd() + for i,plot in enumerate(bot_plots): + if i < 1: + plot.Draw(bot_drawOpts) + + else: + plot.Draw("%ssame"%(bot_drawOpts)) + + #Draw fit functions if present + if len(plot.GetListOfFunctions()) > 0: + func_name = plot.GetListOfFunctions().At(0).GetName() + func = plot.GetFunction("%s"%(func_name)) + func.Draw("%ssame"%(bot_drawOpts)) + + return c, top, bot + + + +def draw_th1(histo, cname, size=(1800,1400), drawOpts="", logY=False, color=None): + + c = r.TCanvas(cname, cname, size[0], size[1]) + c.cd() + if color: + histo.SetLineColor(color) + histo.Draw("%s"%(drawOpts)) + + #Draw fit functions if present + if len(histo.GetListOfFunctions()) > 0: + func_name = histo.GetListOfFunctions().At(0).GetName() + func = histo.GetFunction("%s"%(func_name)) + if color: + func.SetLineColor(color) + func.Draw("same") + + if logY: + c.SetLogy(1) + + return c + +style = SetMyStyle(setOptStat=0) +colors = getColorsHPS() +