Skip to content

Commit 73d3859

Browse files
authored
Merge pull request #48490 from AdrianoDee/fixed_data_wfs_for_testing
Fixed Number of Events Data Wfs for Testing
2 parents 8a0294f + 1efc5f0 commit 73d3859

File tree

7 files changed

+241
-257
lines changed

7 files changed

+241
-257
lines changed

Configuration/PyReleaseValidation/python/MatrixUtil.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -133,12 +133,7 @@ def das(self, das_options, dataset):
133133
elif not self.skimEvents:
134134
command = "dasgoclient %s --query '%s'" % (das_options, self.queries(dataset)[0])
135135
elif self.skimEvents:
136-
from os import getenv
137-
if getenv("JENKINS_PREFIX") is not None:
138-
# to be sure that whatever happens the files are only those at CERN
139-
command = "das-up-to-nevents.py -d %s -e %d -pc -l lumi_ranges.txt"%(dataset,self.events)
140-
else:
141-
command = "das-up-to-nevents.py -d %s -e %d -l lumi_ranges.txt"%(dataset,self.events)
136+
command = "das-up-to-nevents.py -d %s -e %d -l lumi_ranges.txt"%(dataset,self.events)
142137
# Run filter on DAS output
143138
if self.ib_blacklist:
144139
command += " | grep -E -v "
Lines changed: 70 additions & 107 deletions
Original file line numberDiff line numberDiff line change
@@ -1,116 +1,79 @@
11
# import the definition of the steps and input files:
22
from Configuration.PyReleaseValidation.relval_steps import *
3+
from functools import partial
34

45
# here only define the workflows as a combination of the steps defined above:
56
workflows = Matrix()
67

78
## Here we define fixed high stats data workflows
89
## not to be run as default. 10k, 50k, 150k, 250k, 500k or 1M events each
910

10-
offset_era = 0.1 # less than 10 eras per year (hopefully!)
11-
offset_pd = 0.001 # less than 100 pds per year
12-
offset_events = 0.0001 # less than 10 event setups (10k,50k,150k,250k,500k,1M)
13-
14-
## 2025
15-
base_wf = 2025.0
16-
for e_n,era in enumerate(eras_2025):
17-
for p_n,pd in enumerate(pds_2025):
18-
for e_key,evs in event_steps_dict.items():
19-
wf_number = base_wf
20-
wf_number = wf_number + offset_era * e_n
21-
wf_number = wf_number + offset_pd * p_n
22-
wf_number = wf_number + offset_events * evs
23-
wf_number = round(wf_number,6)
24-
25-
## ZeroBias has its own RECO and HARVESTING setup
26-
## ScoutingPFMonitor has its own HLT, RECO and HARVESTING setup
27-
recoharv = hlt = ''
28-
if 'ZeroBias' in pd:
29-
recoharv = 'ZB_'
30-
elif 'ScoutingPFMonitor' in pd:
31-
hlt = recoharv = 'ScoutingPFMonitor_'
32-
33-
recosetup = 'RECONANORUN3_' + recoharv + 'reHLT_2025'
34-
35-
y = str(int(base_wf))
36-
37-
## this is because ParkingDouble* PDs would end up with a too long name for the submission infrastructure
38-
step_name = 'Run' + pd.replace('ParkingDouble','Park2') + era.split('Run')[1] + '_' + e_key
39-
40-
workflows[wf_number] = ['',[step_name,'HLTDR3_' + hlt + y,'RECONANORUN3_' + recoharv + 'reHLT_'+y,'HARVESTRUN3_' + recoharv + y]]
41-
42-
## 2024
43-
base_wf = 2024.0
44-
for e_n,era in enumerate(eras_2024):
45-
for p_n,pd in enumerate(pds_2024):
46-
for e_key,evs in event_steps_dict.items():
47-
wf_number = base_wf
48-
wf_number = wf_number + offset_era * e_n
49-
wf_number = wf_number + offset_pd * p_n
50-
wf_number = wf_number + offset_events * evs
51-
wf_number = round(wf_number,6)
52-
53-
## Here we use JetMET1 PD to run the TeVJet skims
54-
skim = 'TeVJet' if pd == 'JetMET1' else ''
55-
56-
## ZeroBias has its own RECO and HARVESTING setup
57-
suff = 'ZB_' if 'ZeroBias' in pd else ''
58-
59-
# Running C,D,E with the offline GT.
60-
# Could be removed once 2025 wfs are in and we'll test the online GT with them
61-
recosetup = 'RECONANORUN3_' + suff + 'reHLT_2024'
62-
recosetup = recosetup if era[-1] > 'E' else recosetup + '_Offline'
63-
64-
y = str(int(base_wf))
65-
66-
## this is because ParkingDouble* PDs would end up with a too long name for the submission infrastructure
67-
step_name = 'Run' + pd.replace('ParkingDouble','Park2') + era.split('Run')[1] + skim + '_' + e_key
68-
69-
workflows[wf_number] = ['',[step_name,'HLTDR3_' + y,'RECONANORUN3_' + suff + 'reHLT_'+y,'HARVESTRUN3_' + suff + y]]
70-
71-
## 2023
72-
base_wf = 2023.0
73-
for e_n,era in enumerate(eras_2023):
74-
for p_n,pd in enumerate(pds_2023):
75-
for e_key,evs in event_steps_dict.items():
76-
wf_number = base_wf
77-
wf_number = wf_number + offset_era * e_n
78-
wf_number = wf_number + offset_pd * p_n
79-
wf_number = wf_number + offset_events * evs
80-
wf_number = round(wf_number,6)
81-
82-
## this is because ParkingDouble* PDs would end up with a too long name for the submission infrastructure
83-
step_name = 'Run' + pd.replace('ParkingDouble','Park2') + era.split('Run')[1] + '_' + e_key
84-
85-
y = str(int(base_wf)) + 'B' if '2023B' in era else str(int(base_wf))
86-
suff = 'ZB_' if 'ZeroBias' in step_name else ''
87-
workflows[wf_number] = ['',[step_name,'HLTDR3_' + y,'RECONANORUN3_' + suff + 'reHLT_'+y,'HARVESTRUN3_' + suff + y]]
88-
89-
## 2022
90-
base_wf = 2022.0
91-
for e_n,era in enumerate(eras_2022_1):
92-
for p_n,pd in enumerate(pds_2022_1):
93-
for e_key,evs in event_steps_dict.items():
94-
wf_number = base_wf
95-
wf_number = wf_number + offset_era * e_n
96-
wf_number = wf_number + offset_pd * p_n
97-
wf_number = wf_number + offset_events * evs
98-
wf_number = round(wf_number,6)
99-
step_name = 'Run' + pd + era.split('Run')[1] + '_' + e_key
100-
y = str(int(base_wf))
101-
suff = 'ZB_' if 'ZeroBias' in step_name else ''
102-
workflows[wf_number] = ['',[step_name,'HLTDR3_' + y,'RECONANORUN3_' + suff + 'reHLT_'+y,'HARVESTRUN3_' + suff + y]]
103-
104-
# PD names changed during 2022
105-
for e_n,era in enumerate(eras_2022_2):
106-
for p_n,pd in enumerate(pds_2022_2):
107-
for e_key,evs in event_steps_dict.items():
108-
wf_number = base_wf
109-
wf_number = wf_number + offset_era * (e_n + len(eras_2022_1))
110-
wf_number = wf_number + offset_pd * (p_n + len(pds_2022_1))
111-
wf_number = wf_number + offset_events * evs
112-
wf_number = round(wf_number,6)
113-
step_name = 'Run' + pd + era.split('Run')[1] + '_' + e_key
114-
y = str(int(base_wf))
115-
suff = 'ZB_' if 'ZeroBias' in step_name else ''
116-
workflows[wf_number] = ['',[step_name,'HLTDR3_' + y,'RECONANORUN3_' + suff + 'reHLT_'+y,'HARVESTRUN3_' + suff + y]]
11+
def run3NameMod(name):
12+
# ParkingDouble* PDs would end up with a too long name for the submission infrastructure
13+
return name.replace('ParkingDouble','Park2')
14+
15+
def run3HarvMod(pd):
16+
## ZeroBias, ScoutingPFMonitor and ParkingDoubleMuonLowMass
17+
## have their own HARVESTING setup
18+
if 'ZeroBias' in pd:
19+
return 'ZB_'
20+
elif 'ScoutingPFMonitor' in pd:
21+
return 'ScoutingPFMonitor_'
22+
elif 'ParkingDoubleMuonLowMass' in pd:
23+
return 'HFLAV_'
24+
else:
25+
return ''
26+
27+
def run3RecoMod(pd):
28+
## ZeroBias and ScoutingPFMonitor have
29+
## their own RECO setup
30+
if 'ZeroBias' in pd:
31+
return 'ZB_'
32+
elif 'ScoutingPFMonitor' in pd:
33+
return 'ScoutingPFMonitor_'
34+
else:
35+
return ''
36+
37+
def run3HLTMod(pd):
38+
## ScoutingPFMonitor has its own HLT setup
39+
if 'ScoutingPFMonitor' in pd:
40+
return 'ScoutingPFMonitor_'
41+
else:
42+
return ''
43+
44+
def addFixedEventsWfs(years, pds, eras, offset = 0, suffreco = None, suffhlt = None, suffharv = None, namemod = None):
45+
46+
for y in years:
47+
for era in eras:
48+
for pd in pds:
49+
for e_key,evs in event_steps_dict.items():
50+
51+
wf_number = float(y) + offset_pd * pds.index(pd)
52+
wf_number = wf_number + offset_era * eras.index(era)
53+
wf_number = wf_number + offset
54+
wf_number = round(wf_number + offset_events * evs, 6)
55+
56+
# Here we customise the steps depending on the PD name
57+
reco = suffreco(pd) if suffreco is not None else ''
58+
harv = suffharv(pd) if suffharv is not None else ''
59+
hlt = suffhlt(pd) if suffhlt is not None else ''
60+
name = namemod(pd) if namemod is not None else ''
61+
62+
recosetup = 'RECONANORUN3_' + reco + 'reHLT_2025'
63+
harvsetup = 'HARVESTRUN3_' + harv + y
64+
hltsetup = 'HLTDR3_' + hlt + y
65+
66+
step_name = 'Run' + name + y + era + '_' + e_key
67+
if namemod is not None:
68+
step_name = namemod(step_name)
69+
70+
workflows[wf_number] = ['',[step_name, hltsetup, recosetup, harvsetup]]
71+
72+
return wf_number - float(y) #to concatenate the offset
73+
74+
run3FixedWfs = partial(addFixedEventsWfs,suffreco = run3RecoMod, suffhlt = run3HLTMod, suffharv = run3HarvMod, namemod = run3NameMod)
75+
run3FixedWfs(['2025'],pds_2025,eras_2025)
76+
run3FixedWfs(['2024'],pds_2024,eras_2024)
77+
run3FixedWfs(['2023'],pds_2023,eras_2023)
78+
offset_2022 = run3FixedWfs(['2022'],pds_2022_2,eras_2022_2)
79+
run3FixedWfs(['2022'],pds_2022_1,eras_2022_1,offset = offset_2022)

Configuration/PyReleaseValidation/python/relval_standard.py

Lines changed: 43 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -575,40 +575,52 @@
575575
workflows[143.912] = ['',['RunUPC2024','RECODR3_2025_UPC_OXY','HARVESTDPROMPTR3']]
576576
workflows[143.921] = ['',['RunUPC2024','RECODR3_2025_OXY_SKIMIONPHYSICS0','HARVESTDPROMPTR3']]
577577

578-
## Lumi mask fixed 2024 wfs
579-
base_wf = 145.0
580-
offset_era = 0.1 # less than 10 eras per year (hopefully)
581-
offset_pd = 0.001 # less than 100 pds per year
582-
583-
for e_n,era in enumerate(era_mask_2024):
584-
for p_n,pd in enumerate(pds_2024):
585-
586-
# JetMET1 PD is used to run the TeVJet skims
587-
# we don't really need it here
588-
# (also as is the numbering conflicts with
589-
# the scouting wf below, so if we really want to
590-
# extend the pds for standar relvals for 2024 data
591-
# one needs to change the 145.415 below)
592-
if pd == 'JetMET1':
593-
continue
594-
595-
wf_number = round(base_wf + offset_era * e_n + offset_pd * p_n,3)
596-
dataset = '/' + pd + '/' + era + '-v1/RAW'
597-
598-
## ZeroBias have their own HARVESTING
599-
suff = 'ZB_' if 'ZeroBias' in step_name else ''
600-
601-
# Running C,D,E with the offline GT.
602-
# Could be removed once 2025 wfs are in and we'll test the online GT with them
603-
recosetup = 'RECONANORUN3_' + suff + 'reHLT_2024'
604-
recosetup = recosetup if era[-1] > 'E' else recosetup + '_Offline'
605-
606-
step_name = 'Run' + pd.replace('ParkingDouble','Park2') + era.split('Run')[1]
607-
workflows[wf_number] = ['',[step_name,'HLTDR3_2024',recosetup,'HARVESTRUN3_' + suff + '2024']]
608-
609578
## special HLT scouting workflow (with hardcoded private input file from ScoutingPFMonitor skimmed to remove all events without scouting)
610579
workflows[145.415] = ['',['HLTDR3_ScoutingPFMonitor_2024','RECONANORUN3_ScoutingPFMonitor_reHLT_2024','HARVESTRUN3_ScoutingPFMonitor_2024']]
611580

581+
######################################################################################################################################
582+
######################################################################################################################################
583+
584+
## Run3 Fixed Events for Testing and IBs
585+
## (with 1k events in input, cut to 100 at step2)
586+
587+
fixed_events_offset = 1e-7 # to have it unique
588+
589+
def addFixedEventsTestingWfs(years, pds, eras):
590+
591+
for y in years:
592+
for era,pd in zip(eras, pds):
593+
594+
## ZeroBias have their own HARVESTING
595+
suff = 'ZB_' if 'ZeroBias' in pd else ''
596+
597+
wf_number = round(float(y) + offset_pd * pds.index(pd) + fixed_events_offset, 7)
598+
step_name = 'Run' + pd.replace('ParkingDouble','Park2') + y + era + "_10k"
599+
600+
workflows[wf_number] = ['',[step_name,'HLTDR3_' + y,'RECONANORUN3_reHLT_' + y,'HARVESTRUN3_' + suff + y]]
601+
602+
## 2025
603+
pds = ['ZeroBias', 'JetMET0', 'EGamma0']
604+
eras = ['B','C','D']
605+
addFixedEventsTestingWfs(['2025'], pds, eras)
606+
## 2024
607+
pds = ['ZeroBias', 'JetMET0', 'EGamma0', 'DisplacedJet', 'ParkingDoubleMuonLowMass0', 'BTagMu', 'Muon0', 'Tau']
608+
eras = ['B','C','D','E','F','G','H','I']
609+
addFixedEventsTestingWfs(['2024'], pds, eras)
610+
611+
## 2023
612+
pds = ['ZeroBias', 'EGamma0', 'JetMET0']
613+
eras = ['B','C','D']
614+
addFixedEventsTestingWfs(['2023'], pds, eras)
615+
616+
## 2022
617+
pds = ['ZeroBias', 'JetHT', 'Tau', 'BTagMu']
618+
eras = ['B','C','D','E']
619+
addFixedEventsTestingWfs(['2022'], pds, eras)
620+
621+
######################################################################################################################################
622+
######################################################################################################################################
623+
612624
##################################################################
613625
### run3 (2024) skims - Era F ###
614626
workflows[146.101] = ['',['RunZeroBias2024F','HLTDR3_2024','SKIMZEROBIASRUN3_reHLT_2024','HARVESTRUN3_ZB_2024']]

0 commit comments

Comments
 (0)