1+ pipeline_config_id : runner_config
2+
3+ data :
4+ retweet :
5+ data_format : json
6+ train_dir : easytpp/retweet
7+ valid_dir : easytpp/retweet
8+ test_dir : easytpp/retweet
9+ data_specs :
10+ num_event_types : 3
11+ pad_token_id : 3
12+ padding_side : right
13+ truncation_side : right
14+
15+ NHP_train :
16+ base_config :
17+ stage : train
18+ backend : torch
19+ dataset_id : retweet
20+ runner_id : std_tpp
21+ model_id : NHP # model name
22+ base_dir : ' ./checkpoints/'
23+ trainer_config :
24+ batch_size : 256
25+ max_epoch : 20
26+ shuffle : False
27+ optimizer : adam
28+ learning_rate : 1.e-3
29+ valid_freq : 1
30+ use_tfb : False
31+ metrics : [ 'acc', 'rmse' ]
32+ seed : 2019
33+ gpu : -1
34+ model_config :
35+ hidden_size : 64
36+ loss_integral_num_sample_per_step : 20
37+ thinning :
38+ num_seq : 10
39+ num_sample : 1
40+ num_exp : 500 # number of i.i.d. Exp(intensity_bound) draws at one time in thinning algorithm
41+ look_ahead_time : 10
42+ patience_counter : 5 # the maximum iteration used in adaptive thinning
43+ over_sample_rate : 5
44+ num_samples_boundary : 5
45+ dtime_max : 5
46+ num_step_gen : 1
47+
48+
49+
50+ SAHP_train :
51+ base_config :
52+ stage : train
53+ backend : torch
54+ dataset_id : taxi
55+ runner_id : std_tpp
56+ model_id : SAHP # model name
57+ base_dir : ' ./checkpoints/'
58+ trainer_config :
59+ batch_size : 256
60+ max_epoch : 20
61+ shuffle : False
62+ optimizer : adam
63+ learning_rate : 1.e-3
64+ valid_freq : 1
65+ use_tfb : False
66+ metrics : [ 'acc', 'rmse' ]
67+ seed : 2019
68+ gpu : 0
69+ model_config :
70+ hidden_size : 32
71+ time_emb_size : 16
72+ num_layers : 2
73+ num_heads : 2
74+ loss_integral_num_sample_per_step : 20
75+ use_ln : False
76+ thinning :
77+ num_seq : 10
78+ num_sample : 1
79+ num_exp : 500 # number of i.i.d. Exp(intensity_bound) draws at one time in thinning algorithm
80+ look_ahead_time : 10
81+ patience_counter : 5 # the maximum iteration used in adaptive thinning
82+ over_sample_rate : 5
83+ num_samples_boundary : 5
84+ dtime_max : 5
85+ num_step_gen : 1
86+
87+
88+
89+ SAHP_gen :
90+ base_config :
91+ stage : gen
92+ backend : torch
93+ dataset_id : retweet
94+ runner_id : std_tpp
95+ model_id : SAHP # model name
96+ base_dir : ' ./checkpoints/'
97+ trainer_config :
98+ batch_size : 256
99+ max_epoch : 1
100+ model_config :
101+ hidden_size : 16
102+ time_emb_size : 4
103+ num_layers : 2
104+ num_heads : 2
105+ loss_integral_num_sample_per_step : 20
106+ use_ln : False
107+ thinning :
108+ num_seq : 10
109+ num_sample : 1
110+ num_exp : 500 # number of i.i.d. Exp(intensity_bound) draws at one time in thinning algorithm
111+ look_ahead_time : 10
112+ patience_counter : 5 # the maximum iteration used in adaptive thinning
113+ over_sample_rate : 5
114+ num_samples_boundary : 5
115+ dtime_max : 5
116+ num_step_gen : 10
117+
118+ THP_train :
119+ base_config :
120+ stage : train
121+ backend : torch
122+ dataset_id : taxi
123+ runner_id : std_tpp
124+ model_id : THP # model name
125+ base_dir : ' ./checkpoints/'
126+ trainer_config :
127+ batch_size : 256
128+ max_epoch : 30
129+ shuffle : False
130+ optimizer : adam
131+ learning_rate : 1.e-3
132+ valid_freq : 1
133+ use_tfb : False
134+ metrics : [ 'acc', 'rmse' ]
135+ seed : 2019
136+ gpu : -1
137+ model_config :
138+ hidden_size : 32
139+ time_emb_size : 16
140+ num_layers : 2
141+ num_heads : 2
142+ mc_num_sample_per_step : 20
143+ loss_integral_num_sample_per_step : 20
144+ use_ln : False
145+ thinning :
146+ num_seq : 10
147+ num_sample : 1
148+ num_exp : 500 # number of i.i.d. Exp(intensity_bound) draws at one time in thinning algorithm
149+ look_ahead_time : 10
150+ patience_counter : 5 # the maximum iteration used in adaptive thinning
151+ over_sample_rate : 5
152+ num_samples_boundary : 5
153+ dtime_max : 5
154+ num_step_gen : 1
155+
156+
157+ THP_gen :
158+ base_config :
159+ stage : gen
160+ backend : torch
161+ dataset_id : retweet
162+ runner_id : std_tpp
163+ model_id : THP # model name
164+ base_dir : ' ./checkpoints/'
165+ trainer_config :
166+ batch_size : 256
167+ max_epoch : 1
168+ model_config :
169+ hidden_size : 32
170+ time_emb_size : 16
171+ num_layers : 2
172+ num_heads : 2
173+ mc_num_sample_per_step : 20
174+ loss_integral_num_sample_per_step : 20
175+ use_ln : False
176+ # pretrained_model_dir: ./checkpoints/2694_4384867712_230603-160544/models/saved_model
177+ thinning :
178+ num_seq : 10
179+ num_sample : 1
180+ num_exp : 500 # number of i.i.d. Exp(intensity_bound) draws at one time in thinning algorithm
181+ look_ahead_time : 10
182+ patience_counter : 5 # the maximum iteration used in adaptive thinning
183+ over_sample_rate : 5
184+ num_samples_boundary : 5
185+ dtime_max : 5
186+ num_step_gen : 10
187+
188+ AttNHP_train :
189+ base_config :
190+ stage : train
191+ backend : torch
192+ dataset_id : taxi
193+ runner_id : std_tpp
194+ model_id : AttNHP # model name
195+ base_dir : ' ./checkpoints/'
196+ trainer_config :
197+ batch_size : 256
198+ max_epoch : 200
199+ shuffle : False
200+ optimizer : adam
201+ learning_rate : 1.e-3
202+ valid_freq : 1
203+ use_tfb : False
204+ metrics : [ 'acc', 'rmse' ]
205+ seed : 2019
206+ gpu : -1
207+ model_config :
208+ hidden_size : 16
209+ time_emb_size : 4
210+ num_layers : 2
211+ num_heads : 2
212+ loss_integral_num_sample_per_step : 10
213+ use_ln : False
214+ thinning :
215+ num_seq : 2
216+ num_sample : 1
217+ num_exp : 50 # number of i.i.d. Exp(intensity_bound) draws at one time in thinning algorithm
218+ look_ahead_time : 10
219+ patience_counter : 5 # the maximum iteration used in adaptive thinning
220+ over_sample_rate : 5
221+ num_samples_boundary : 5
222+ dtime_max : 5
223+ num_step_gen : 1
224+
225+
226+ AttNHP_gen :
227+ base_config :
228+ stage : gen
229+ backend : torch
230+ dataset_id : retweet
231+ runner_id : std_tpp
232+ model_id : AttNHP # model name
233+ base_dir : ' ./checkpoints/'
234+ trainer_config :
235+ batch_size : 256
236+ max_epoch : 1
237+ model_config :
238+ hidden_size : 16
239+ time_emb_size : 4
240+ num_layers : 2
241+ num_heads : 2
242+ mc_num_sample_per_step : 20
243+ loss_integral_num_sample_per_step : 20
244+ use_ln : False
245+ # pretrained_model_dir: ./checkpoints/6934_4375315840_230603-222826/models/saved_model
246+ thinning :
247+ num_seq : 10
248+ num_sample : 1
249+ num_exp : 50 # number of i.i.d. Exp(intensity_bound) draws at one time in thinning algorithm
250+ look_ahead_time : 10
251+ patience_counter : 5 # the maximum iteration used in adaptive thinning
252+ over_sample_rate : 5
253+ num_samples_boundary : 5
254+ dtime_max : 5
255+ num_step_gen : 10
0 commit comments