Skip to content

Commit d75f1b9

Browse files
committed
Restore n3fit files from master
1 parent 333e597 commit d75f1b9

File tree

4 files changed

+9
-193
lines changed

4 files changed

+9
-193
lines changed

n3fit/src/n3fit/layers/DIS.py

Lines changed: 3 additions & 129 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323
"""
2424

2525
import numpy as np
26-
from scipy import interpolate as scint
2726

2827
from n3fit.backends import operations as op
2928

@@ -40,122 +39,6 @@ class DIS(Observable):
4039
while the input pdf is rank 4 of shape (batch_size, replicas, xgrid, flavours)
4140
"""
4241

43-
def __init__(self, fktable_data,
44-
fktable_arr,
45-
dataset_name,
46-
boundary_condition=None,
47-
operation_name="NULL",
48-
nfl=14,
49-
n_replicas=1,
50-
power_corrections=False,
51-
ht_type=None,
52-
exp_kinematics=None,
53-
**kwargs):
54-
super().__init__(fktable_data, fktable_arr, dataset_name, boundary_condition, operation_name, nfl, n_replicas, **kwargs)
55-
56-
self.compute_power_corrections = power_corrections
57-
self.power_corrections = None
58-
59-
# NOTE
60-
# Ratio of SFs are not implemented yet. Work in progress.
61-
if self.compute_power_corrections and exp_kinematics is not None:
62-
self.exp_kinematics = exp_kinematics
63-
if ht_type is None:
64-
self.ht_type = 'ABMP'
65-
raise NotImplementedError("This part should be reimplemented.")
66-
else:
67-
self.ht_type = ht_type
68-
69-
if self.ht_type == 'ABMP':
70-
self.power_corrections = self.compute_abmp_parametrisation()
71-
elif self.ht_type == 'custom':
72-
self.power_corrections = self.compute_custom_parametrisation()
73-
else:
74-
raise Exception(f"HT type {ht_type} is not implemented.")
75-
76-
77-
def compute_abmp_parametrisation(self):
78-
"""
79-
This function is very similar to `compute_ht_parametrisation` in
80-
validphys.theorycovariance.construction.py. However, the latter
81-
accounts for shifts in the 5pt prescription. As of now, this function
82-
is meant to work only for DIS NC data, using the ABMP16 result.
83-
"""
84-
x_knots = [0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1]
85-
y_h2 = [0.023, -0.032, -0.005, 0.025, 0.051, 0.003, 0.0]
86-
y_ht = [-0.319, -0.134, -0.052, 0.071, 0.030, 0.003, 0.0]
87-
#h2_sigma = [0.019, 0.013, 0.009, 0.006, 0.005, 0.004]
88-
#ht_sigma = [0.126, 0.040, 0.030, 0.025, 0.012, 0.007]
89-
H_2 = scint.CubicSpline(x_knots, y_h2)
90-
H_T = scint.CubicSpline(x_knots, y_ht)
91-
92-
# Reconstruct HL from HT and H2
93-
def H_L(x):
94-
return (H_2(x) - np.power(x, 0.05) * H_T(x))
95-
96-
H_2 = np.vectorize(H_2)
97-
H_L = np.vectorize(H_L)
98-
99-
x = self.exp_kinematics['kin1']
100-
y = self.exp_kinematics['kin3']
101-
Q2 = self.exp_kinematics['kin2']
102-
N2, NL = 1#compute_normalisation_by_experiment(self.dataname, x, y, Q2)
103-
104-
PC_2 = N2 * H_2(x) / Q2
105-
PC_L = NL * H_L(x) / Q2
106-
power_correction = PC_2 + PC_L
107-
power_correction = power_correction.to_numpy()
108-
109-
return power_correction
110-
111-
112-
def compute_custom_parametrisation(self):
113-
"""
114-
This function is very similar to `compute_ht_parametrisation` in
115-
validphys.theorycovariance.construction.py. However, the latter
116-
accounts for shifts in the 5pt prescription. As of now, this function
117-
is meant to work only for DIS NC data, using the ABMP16 result.
118-
"""
119-
# Posteriors from 240812-01-ABMP-large-prior-7k
120-
x_knots = [0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1]
121-
y_h2_p = [-0.00441, 0.11169, -0.01632, 0.00000, -0.08742, -0.07279, 0.00000]
122-
y_hl_p = [0.00000, -0.06241, -0.08655, -0.03306, 0.00000, -0.05987, 0.0000]
123-
y_h2_d = [-0.04117, 0.00000, 0.03124, -0.01059, 0.04763, 0.00000, 0.00000]
124-
y_hl_d = [0.00316, 0.00469, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]
125-
126-
H_2p = scint.CubicSpline(x_knots, y_h2_p)
127-
H_lp = scint.CubicSpline(x_knots, y_hl_p)
128-
H_2d = scint.CubicSpline(x_knots, y_h2_d)
129-
H_ld = scint.CubicSpline(x_knots, y_hl_d)
130-
131-
H_2p = np.vectorize(H_2p)
132-
H_lp = np.vectorize(H_lp)
133-
H_2d = np.vectorize(H_2d)
134-
H_ld = np.vectorize(H_ld)
135-
136-
x = self.exp_kinematics['kin1']
137-
y = self.exp_kinematics['kin3']
138-
Q2 = self.exp_kinematics['kin2']
139-
N2, NL = compute_normalisation_by_experiment(self.dataname, x, y, Q2)
140-
141-
if "_P_" in self.dataname or "HERA" in self.dataname:
142-
PC_2 = N2 * H_2p(x) / Q2
143-
PC_L = NL * H_lp(x) / Q2
144-
elif "_D_" in self.dataname:
145-
PC_2 = N2 * H_2d(x) / Q2
146-
PC_L = NL * H_ld(x) / Q2
147-
else:
148-
# TODO
149-
# Need to implement this
150-
PC_2 = 0 / Q2 #N2 * H_2d(x) / Q2
151-
PC_L = 0 / Q2 #NL * H_ld(x) / Q2
152-
153-
power_correction = PC_2 + PC_L
154-
power_correction = power_correction.to_numpy()
155-
156-
return power_correction
157-
158-
15942
def gen_mask(self, basis):
16043
"""
16144
Receives a list of active flavours and generates a boolean mask tensor
@@ -202,11 +85,7 @@ def build(self, input_shape):
20285
if self.num_replicas > 1:
20386
self.compute_observable = compute_dis_observable_many_replica
20487
else:
205-
# Currying the function so that the `Observable` does not need
206-
# to get modified
207-
def compute_dis_observable_one_replica_w_pc(pdf, padded_fk):
208-
return compute_dis_observable_one_replica(pdf, padded_fk, power_corrections = self.power_corrections)
209-
self.compute_observable = compute_dis_observable_one_replica_w_pc
88+
self.compute_observable = compute_dis_observable_one_replica
21089

21190

21291
def compute_dis_observable_many_replica(pdf, padded_fk):
@@ -228,14 +107,9 @@ def compute_dis_observable_many_replica(pdf, padded_fk):
228107
return op.einsum('brxf, nxf -> brn', pdf[0], padded_fk)
229108

230109

231-
def compute_dis_observable_one_replica(pdf, padded_fk, power_corrections = None):
110+
def compute_dis_observable_one_replica(pdf, padded_fk):
232111
"""
233112
Same operations as above but a specialized implementation that is more efficient for 1 replica,
234113
masking the PDF rather than the fk table.
235114
"""
236-
if power_corrections is None:
237-
238-
return op.tensor_product(pdf[0], padded_fk, axes=[(2, 3), (1, 2)])
239-
else:
240-
241-
return op.tensor_product(pdf[0], padded_fk, axes=[(2, 3), (1, 2)]) + power_corrections
115+
return op.tensor_product(pdf[0], padded_fk, axes=[(2, 3), (1, 2)])

n3fit/src/n3fit/model_gen.py

Lines changed: 2 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -138,9 +138,6 @@ def observable_generator(
138138
positivity_initial=1.0,
139139
integrability=False,
140140
n_replicas=1,
141-
exp_data=None,
142-
power_corrections=None,
143-
ht_type=None
144141
): # pylint: disable=too-many-locals
145142
"""
146143
This function generates the observable models for each experiment.
@@ -182,10 +179,6 @@ def observable_generator(
182179
set the positivity lagrange multiplier for epoch 1
183180
integrability: bool
184181
switch on/off the integrability constraints
185-
power_corrections: bool
186-
whether to include HT in theory predictions
187-
ht_type: str
188-
type of HT parametrisation
189182
190183
Returns
191184
------
@@ -201,37 +194,16 @@ def observable_generator(
201194
dataset_xsizes = []
202195
model_inputs = []
203196
model_observables = []
204-
kin_by_dict = {}
205-
206-
if exp_data is not None:
207-
included_processes = [
208-
'DEUTERON',
209-
'NMC',
210-
'NUCLEAR',
211-
#'HERACOMB',
212-
]
213-
for process in exp_data:
214-
commondata = process.load_commondata()
215-
for dataset in commondata:
216-
if process.name in included_processes and "_NC_" in dataset.setname:
217-
kin_by_dict[dataset.setname] = dataset.kinematics
218-
else:
219-
kin_by_dict[dataset.setname] = None
220-
221197
# The first step is to compute the observable for each of the datasets
222198
for dataset in spec_dict["datasets"]:
223199
# Get the generic information of the dataset
224200
dataset_name = dataset.name
225-
kinematics = None
226201

227202
# Look at what kind of layer do we need for this dataset
228203
if dataset.hadronic:
229204
Obs_Layer = DY
230205
else:
231206
Obs_Layer = DIS
232-
if power_corrections:
233-
if exp_data is not None and kin_by_dict[dataset_name] is not None:
234-
kinematics = kin_by_dict[dataset_name]
235207

236208
# Set the operation (if any) to be applied to the fktables of this dataset
237209
operation_name = dataset.operation
@@ -242,29 +214,15 @@ def observable_generator(
242214
# list of validphys.coredata.FKTableData objects
243215
# these will then be used to check how many different pdf inputs are needed
244216
# (and convolutions if given the case)
245-
if dataset.hadronic:
246-
obs_layer = Obs_Layer(
217+
obs_layer = Obs_Layer(
247218
dataset.fktables_data,
248219
dataset.fktables(),
249220
dataset_name,
250221
boundary_condition,
251222
operation_name,
252223
n_replicas=n_replicas,
253224
name=f"dat_{dataset_name}",
254-
)
255-
else:
256-
obs_layer = Obs_Layer(
257-
dataset.fktables_data,
258-
dataset.fktables(),
259-
dataset_name,
260-
boundary_condition,
261-
operation_name,
262-
n_replicas=n_replicas,
263-
name=f"dat_{dataset_name}",
264-
power_corrections=power_corrections,
265-
exp_kinematics=kinematics if power_corrections else None,
266-
ht_type=None if not power_corrections else ht_type
267-
)
225+
)
268226

269227
# If the observable layer found that all input grids are equal, the splitting will be None
270228
# otherwise the different xgrids need to be stored separately

n3fit/src/n3fit/model_trainer.py

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -109,8 +109,6 @@ def __init__(
109109
theoryid=None,
110110
lux_params=None,
111111
replicas=None,
112-
power_corrections=False,
113-
ht_type=None
114112
):
115113
"""
116114
Parameters
@@ -145,10 +143,6 @@ def __init__(
145143
dictionary containing the params needed from LuxQED
146144
replicas: list
147145
list with the replicas ids to be fitted
148-
power_corrections: bool
149-
whether to include HT in theory predictions
150-
ht_type: str
151-
type of HT parametrisation
152146
"""
153147
# Save all input information
154148
self.exp_info = list(exp_info)
@@ -166,8 +160,6 @@ def __init__(
166160
self.lux_params = lux_params
167161
self.replicas = replicas
168162
self.experiments_data = experiments_data
169-
self.power_corrections = power_corrections
170-
self.ht_type = ht_type
171163

172164
# Initialise internal variables which define behaviour
173165
if debug:
@@ -570,9 +562,6 @@ def _generate_observables(
570562
invcovmat_tr=experiment_data["invcovmat"][i],
571563
invcovmat_vl=experiment_data["invcovmat_vl"][i],
572564
n_replicas=len(self.replicas),
573-
exp_data=self.experiments_data,
574-
power_corrections=self.power_corrections,
575-
ht_type=self.ht_type
576565
)
577566

578567
# Save the input(s) corresponding to this experiment
@@ -947,7 +936,10 @@ def hyperparametrizable(self, params):
947936
)
948937

949938
if photons:
950-
pdf_model.get_layer("add_photon").register_photon(xinput.input.tensor_content)
939+
if self._scaler: # select only the non-scaled input
940+
pdf_model.get_layer("add_photon").register_photon(xinput.input.tensor_content[:,:,1:])
941+
else:
942+
pdf_model.get_layer("add_photon").register_photon(xinput.input.tensor_content)
951943

952944
# Model generation joins all the different observable layers
953945
# together with pdf model generated above

n3fit/src/n3fit/performfit.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,6 @@ def performfit(
4747
maxcores=None,
4848
double_precision=False,
4949
parallel_models=False,
50-
power_corrections=False,
51-
ht_type=None
5250
):
5351
"""
5452
This action will (upon having read a validcard) process a full PDF fit
@@ -135,10 +133,6 @@ def performfit(
135133
whether to use double precision
136134
parallel_models: bool
137135
whether to run models in parallel
138-
power_corrections: bool
139-
whether to include HT in theory predictions
140-
ht_type: str
141-
Type of HT parametrisation
142136
"""
143137
from n3fit.backends import set_initial_state
144138

@@ -208,8 +202,6 @@ def performfit(
208202
theoryid=theoryid,
209203
lux_params=fiatlux,
210204
replicas=replica_idxs,
211-
power_corrections=power_corrections,
212-
ht_type=ht_type
213205
)
214206

215207
# This is just to give a descriptive name to the fit function

0 commit comments

Comments
 (0)