@@ -268,23 +268,26 @@ def daily_extend(
268
268
excluded_sample_dir = None ,
269
269
):
270
270
start_day = last_date (base_ts )
271
-
272
- reconsidered_samples = collections .deque ()
273
271
earliest_date = start_day - datetime .timedelta (days = 1 )
274
- if base_ts is not None :
275
- next_day = start_day + datetime .timedelta (days = 1 )
276
- reconsidered_samples .extend (
277
- fetch_samples_from_pickle_file (
278
- date = next_day ,
279
- num_past_days = num_past_days ,
280
- in_dir = excluded_sample_dir ,
281
- )
282
- )
283
- earliest_date = next_day - datetime .timedelta (days = num_past_days )
272
+
273
+ # if num_past_days is None:
274
+ # num_past_days = 0
275
+
276
+ # reconsidered_samples = collections.deque()
277
+ # if base_ts is not None:
278
+ # next_day = start_day + datetime.timedelta(days=1)
279
+ # reconsidered_samples.extend(
280
+ # fetch_samples_from_pickle_file(
281
+ # date=next_day,
282
+ # num_past_days=num_past_days,
283
+ # in_dir=excluded_sample_dir,
284
+ # )
285
+ # )
286
+ # earliest_date = next_day - datetime.timedelta(days=num_past_days)
284
287
285
288
last_ts = base_ts
286
289
for date in metadata_db .get_days (start_day ):
287
- ts , excluded_samples , added_back_samples = extend (
290
+ ts = extend (
288
291
alignment_store = alignment_store ,
289
292
metadata_db = metadata_db ,
290
293
date = date ,
@@ -298,29 +301,29 @@ def daily_extend(
298
301
num_threads = num_threads ,
299
302
precision = precision ,
300
303
rng = rng ,
301
- reconsidered_samples = reconsidered_samples ,
304
+ # reconsidered_samples=reconsidered_samples,
302
305
)
303
- yield ts , excluded_samples , date
306
+ yield ts , date
304
307
305
308
# Update list of reconsidered samples.
306
309
# Remove oldest reconsidered samples.
307
- if len (reconsidered_samples ) > 0 :
308
- while reconsidered_samples [0 ].date == earliest_date :
309
- reconsidered_samples .popleft ()
310
- # Remove samples just added back.
311
- if len (added_back_samples ) > 0 :
312
- # TODO: Horrible. This needs to be reworked after
313
- # storing pickled Samples in a SQLite db.
314
- samples_to_remove = []
315
- for sample_added_back in added_back_samples :
316
- for sample_reconsidered in reconsidered_samples :
317
- if sample_added_back .strain == sample_reconsidered .strain :
318
- samples_to_remove .append (sample_added_back )
319
- continue
320
- for sample in samples_to_remove :
321
- reconsidered_samples .remove (sample )
322
- # Add new excluded samples.
323
- reconsidered_samples .extend (excluded_samples )
310
+ # if len(reconsidered_samples) > 0:
311
+ # while reconsidered_samples[0].date == earliest_date:
312
+ # reconsidered_samples.popleft()
313
+ # # Remove samples just added back.
314
+ # if len(added_back_samples) > 0:
315
+ # # TODO: Horrible. This needs to be reworked after
316
+ # # storing pickled Samples in a SQLite db.
317
+ # samples_to_remove = []
318
+ # for sample_added_back in added_back_samples:
319
+ # for sample_reconsidered in reconsidered_samples:
320
+ # if sample_added_back.strain == sample_reconsidered.strain:
321
+ # samples_to_remove.append(sample_added_back)
322
+ # continue
323
+ # for sample in samples_to_remove:
324
+ # reconsidered_samples.remove(sample)
325
+ # # Add new excluded samples.
326
+ # reconsidered_samples.extend(excluded_samples)
324
327
325
328
earliest_date += datetime .timedelta (days = 1 )
326
329
@@ -428,7 +431,7 @@ def extend(
428
431
)
429
432
ts = increment_time (date , base_ts )
430
433
431
- ts , excluded_samples , _ = add_matching_results (
434
+ ts = add_matching_results (
432
435
samples = samples ,
433
436
ts = ts ,
434
437
date = date ,
@@ -438,17 +441,17 @@ def extend(
438
441
show_progress = show_progress ,
439
442
)
440
443
441
- ts , _ , added_back_samples = add_matching_results (
442
- samples = reconsidered_samples ,
443
- ts = ts ,
444
- date = date ,
445
- num_mismatches = num_mismatches ,
446
- max_hmm_cost = None ,
447
- min_group_size = min_group_size ,
448
- show_progress = show_progress ,
449
- )
444
+ # ts, _, added_back_samples = add_matching_results(
445
+ # samples=reconsidered_samples,
446
+ # ts=ts,
447
+ # date=date,
448
+ # num_mismatches=num_mismatches,
449
+ # max_hmm_cost=None,
450
+ # min_group_size=min_group_size,
451
+ # show_progress=show_progress,
452
+ # )
450
453
451
- return ts , excluded_samples , added_back_samples
454
+ return ts # , excluded_samples, added_back_samples
452
455
453
456
454
457
def match_path_ts (samples , ts , path , reversions ):
@@ -611,7 +614,7 @@ def add_matching_results(
611
614
# print(ts.draw_text())
612
615
ts = coalesce_mutations (ts , attach_nodes )
613
616
614
- return ts # , excluded_samples, added_samples
617
+ return ts # , excluded_samples, added_samples
615
618
616
619
617
620
def fetch_samples_from_pickle_file (date , num_past_days = None , in_dir = None ):
@@ -624,15 +627,15 @@ def fetch_samples_from_pickle_file(date, num_past_days=None, in_dir=None):
624
627
for i in range (num_past_days , 0 , - 1 ):
625
628
past_date = date - datetime .timedelta (days = i )
626
629
pickle_file = in_dir + "/"
627
- pickle_file += past_date .strftime (' %Y-%m-%d' ) + file_suffix
630
+ pickle_file += past_date .strftime (" %Y-%m-%d" ) + file_suffix
628
631
if os .path .exists (pickle_file ):
629
632
samples += parse_pickle_file (pickle_file )
630
633
return samples
631
634
632
635
633
636
def parse_pickle_file (pickle_file ):
634
637
"""Return a list of Sample objects."""
635
- with open (pickle_file , 'rb' ) as f :
638
+ with open (pickle_file , "rb" ) as f :
636
639
samples = pickle .load (f )
637
640
return samples
638
641
@@ -714,7 +717,6 @@ def node_mutation_descriptors(ts, u):
714
717
715
718
716
719
def update_tables (tables , edges_to_delete , mutations_to_delete ):
717
-
718
720
# Updating the mutations is a real faff, and the only way I
719
721
# could get it to work is by setting the time values. This should
720
722
# be easier...
0 commit comments