-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathPowerballv2.py
More file actions
1268 lines (1089 loc) · 45.5 KB
/
Powerballv2.py
File metadata and controls
1268 lines (1089 loc) · 45.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Powerball.py - OPTIMIZED VERSION
# Enhanced statistical analysis with improved accuracy and additional metrics
import io
import math
import base64
import numpy as np
import pandas as pd
import requests
import streamlit as st
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from io import StringIO
from scipy import stats
# Import seed analysis module
try:
from Seeds import (
render_seed_analysis,
assign_virtual_seeds_to_history,
detect_seed_patterns,
predict_next_seed_from_patterns,
backtest_seed_methods,
)
SEEDS_AVAILABLE = True
except ImportError:
SEEDS_AVAILABLE = False
WHITE_MAX = 69
MEGA_MAX = 26 # Powerball max
NY_OD_URL = "https://data.ny.gov/api/views/d6yy-54nr/rows.csv?accessType=DOWNLOAD"
st.set_page_config(page_title="Powerball Patterns", layout="wide")
# ---------- Utilities ----------
@st.cache_data(show_spinner=False)
def fetch_history_years(years: int) -> pd.DataFrame:
r = requests.get(NY_OD_URL, timeout=30)
r.raise_for_status()
raw = pd.read_csv(StringIO(r.text))
raw_cols = {c.lower(): c for c in raw.columns}
date_col = raw_cols.get("draw date", "Draw Date")
raw[date_col] = pd.to_datetime(raw[date_col])
raw = raw.sort_values(date_col)
cutoff = datetime.today() - timedelta(days=int(365.25 * years))
raw = raw[raw[date_col] >= cutoff].copy()
win_col = raw_cols.get("winning numbers", "Winning Numbers")
parts = raw[win_col].astype(str).str.split(" ", expand=True)
whites = parts.iloc[:, :5].astype(int)
whites.columns = ["w1", "w2", "w3", "w4", "w5"]
if parts.shape[1] >= 6:
red = parts.iloc[:, 5].astype(int)
else:
power_col = (
raw_cols.get("powerball")
or raw_cols.get("power ball")
or raw_cols.get("pb")
or "Powerball"
)
red = raw[power_col].astype(int)
df = pd.DataFrame(
{
"date": raw[date_col],
"w1": whites["w1"],
"w2": whites["w2"],
"w3": whites["w3"],
"w4": whites["w4"],
"w5": whites["w5"],
"mega": red,
}
).sort_values("date")
return df.reset_index(drop=True)
def load_uploaded(csv_bytes: bytes) -> pd.DataFrame:
df = pd.read_csv(io.BytesIO(csv_bytes))
cols = {c.lower(): c for c in df.columns}
req = ["date", "w1", "w2", "w3", "w4", "w5", "mega"]
if not all(k in cols for k in req):
raise ValueError("CSV must include: date,w1,w2,w3,w4,w5,mega")
df = df.rename(columns={cols[k]: k for k in req})
df["date"] = pd.to_datetime(df["date"])
for k in ["w1", "w2", "w3", "w4", "w5", "mega"]:
df[k] = df[k].astype(int)
return df.sort_values("date").reset_index(drop=True)
def recency_weights(dates: pd.Series, half_life_days: float) -> np.ndarray:
t_max = dates.max()
age = (t_max - dates).dt.days.astype(float)
lam = math.log(2) / max(half_life_days, 1.0)
w = np.exp(-lam * age)
w /= w.sum()
return w
def make_freq_tables(df: pd.DataFrame, weights: np.ndarray | None = None):
if weights is None:
weights = np.ones(len(df)) / len(df)
wfreq = np.zeros(WHITE_MAX + 1)
mfreq = np.zeros(MEGA_MAX + 1)
for (_, r), wt in zip(df.iterrows(), weights):
for n in [int(r[k]) for k in ["w1", "w2", "w3", "w4", "w5"]]:
wfreq[n] += wt
mfreq[int(r["mega"])] += wt
return wfreq / wfreq.sum(), mfreq / mfreq.sum()
# NEW: Chi-square goodness of fit test for randomness
def test_randomness(df: pd.DataFrame):
"""Test if number frequencies deviate significantly from uniform distribution"""
white_counts = np.zeros(WHITE_MAX + 1)
mega_counts = np.zeros(MEGA_MAX + 1)
for _, r in df.iterrows():
for n in [int(r[k]) for k in ["w1", "w2", "w3", "w4", "w5"]]:
white_counts[n] += 1
mega_counts[int(r["mega"])] += 1
# Expected frequencies (uniform distribution)
n_draws = len(df)
expected_white = n_draws * 5 / WHITE_MAX
expected_mega = n_draws / MEGA_MAX
# Chi-square test
chi2_white, p_white = stats.chisquare(white_counts[1:], f_exp=expected_white)
chi2_mega, p_mega = stats.chisquare(mega_counts[1:], f_exp=expected_mega)
return {
"white_chi2": chi2_white,
"white_pvalue": p_white,
"mega_chi2": chi2_mega,
"mega_pvalue": p_mega,
}
# NEW: Moving average frequency (trend detection)
def moving_window_freq(df: pd.DataFrame, window_size: int = 50):
"""Calculate frequencies in sliding windows to detect trends"""
if len(df) < window_size:
return None
windows = []
for i in range(len(df) - window_size + 1):
window_df = df.iloc[i : i + window_size]
w_freq, m_freq = make_freq_tables(window_df)
windows.append(
{
"end_date": window_df["date"].iloc[-1],
"white_entropy": stats.entropy(w_freq[1:]),
"mega_entropy": stats.entropy(m_freq[1:]),
}
)
return pd.DataFrame(windows)
# NEW: Streak analysis
def analyze_streaks(df: pd.DataFrame):
"""Find longest gaps and shortest gaps between appearances"""
white_gaps = {n: [] for n in range(1, WHITE_MAX + 1)}
mega_gaps = {n: [] for n in range(1, MEGA_MAX + 1)}
white_last = {n: None for n in range(1, WHITE_MAX + 1)}
mega_last = {n: None for n in range(1, MEGA_MAX + 1)}
for idx, r in df.iterrows():
# White balls
for n in [int(r[k]) for k in ["w1", "w2", "w3", "w4", "w5"]]:
if white_last[n] is not None:
gap = idx - white_last[n]
white_gaps[n].append(gap)
white_last[n] = idx
# Powerball
m = int(r["mega"])
if mega_last[m] is not None:
gap = idx - mega_last[m]
mega_gaps[m].append(gap)
mega_last[m] = idx
# Calculate statistics
white_stats = {}
for n in range(1, WHITE_MAX + 1):
if white_gaps[n]:
white_stats[n] = {
"mean_gap": np.mean(white_gaps[n]),
"std_gap": np.std(white_gaps[n]),
"max_gap": max(white_gaps[n]),
"min_gap": min(white_gaps[n]),
}
mega_stats = {}
for n in range(1, MEGA_MAX + 1):
if mega_gaps[n]:
mega_stats[n] = {
"mean_gap": np.mean(mega_gaps[n]),
"std_gap": np.std(mega_gaps[n]),
"max_gap": max(mega_gaps[n]),
"min_gap": min(mega_gaps[n]),
}
return white_stats, mega_stats
# NEW: Pair correlation strength
def pair_correlation_strength(df: pd.DataFrame, top_n: int = 20):
"""Calculate which pairs appear together more than expected by chance"""
C = np.zeros((WHITE_MAX + 1, WHITE_MAX + 1), dtype=float)
individual_counts = np.zeros(WHITE_MAX + 1)
n_draws = len(df)
for _, r in df.iterrows():
w = sorted([int(r[k]) for k in ["w1", "w2", "w3", "w4", "w5"]])
for num in w:
individual_counts[num] += 1
for i in range(5):
for j in range(i + 1, 5):
a, b = w[i], w[j]
C[a, b] += 1
C[b, a] += 1
# Calculate expected co-occurrence under independence
correlations = []
for i in range(1, WHITE_MAX + 1):
for j in range(i + 1, WHITE_MAX + 1):
observed = C[i, j]
# Expected if independent: P(i appears) * P(j appears | i appeared) * n_draws
# Approximation: (count_i/total_slots) * (count_j/(total_slots-1)) * total_pairs
p_i = individual_counts[i] / (n_draws * 5)
p_j = individual_counts[j] / (n_draws * 5)
expected = n_draws * 10 * p_i * p_j # 10 pairs per draw
if expected > 0:
ratio = observed / expected
correlations.append((i, j, observed, expected, ratio))
# Sort by deviation from expected
correlations.sort(key=lambda x: abs(x[4] - 1), reverse=True)
return correlations[:top_n]
def cooccurrence_matrix(df: pd.DataFrame) -> np.ndarray:
C = np.zeros((WHITE_MAX + 1, WHITE_MAX + 1), dtype=float)
for _, r in df.iterrows():
w = sorted([int(r[k]) for k in ["w1", "w2", "w3", "w4", "w5"]])
for i in range(5):
for j in range(i + 1, 5):
a, b = w[i], w[j]
C[a, b] += 1
C[b, a] += 1
totals = np.zeros(WHITE_MAX + 1)
for n in range(1, WHITE_MAX + 1):
totals[n] = ((df[["w1", "w2", "w3", "w4", "w5"]] == n).any(axis=1)).sum()
C[n, n] = totals[n]
return C
def days_since_last_seen(df: pd.DataFrame):
last_seen = {n: None for n in range(1, WHITE_MAX + 1)}
mega_last = {n: None for n in range(1, MEGA_MAX + 1)}
for _, r in df.iterrows():
d = r["date"]
for n in [int(r[k]) for k in ["w1", "w2", "w3", "w4", "w5"]]:
last_seen[n] = d
mega_last[int(r["mega"])] = d
t_max = df["date"].max()
white_gap = {
n: (t_max - last_seen[n]).days if last_seen[n] else None
for n in range(1, WHITE_MAX + 1)
}
mega_gap = {
n: (t_max - mega_last[n]).days if mega_last[n] else None
for n in range(1, MEGA_MAX + 1)
}
return white_gap, mega_gap
# IMPROVED: Balanced ticket generation using constraint satisfaction
def generate_balanced_tickets(n=10, seed=123):
"""Generate tickets with balanced coverage across number ranges"""
rng = np.random.default_rng(seed)
tickets = []
# Divide white balls into ranges for coverage
ranges = [
list(range(1, 15)), # 1-14
list(range(15, 29)), # 15-28
list(range(29, 43)), # 29-42
list(range(43, 57)), # 43-56
list(range(57, 70)), # 57-69
]
for i in range(n):
# Pick 1 number from each range for good coverage
whites = []
for r in ranges:
whites.append(rng.choice(r))
whites = tuple(sorted(whites))
# Cycle through powerballs
mega = (i % MEGA_MAX) + 1
tickets.append((whites, mega))
return tickets
def sample_tickets_from_probs(
wprob: np.ndarray, mprob: np.ndarray, k_tickets=10, seed=123
):
rng = np.random.RandomState(seed)
whites = np.arange(1, WHITE_MAX + 1)
megas = np.arange(1, MEGA_MAX + 1)
out = []
for _ in range(k_tickets):
w = tuple(
sorted(
rng.choice(whites, size=5, replace=False, p=wprob[1:] / wprob[1:].sum())
)
)
m = int(
rng.choice(megas, size=1, replace=False, p=mprob[1:] / mprob[1:].sum())[0]
)
out.append((w, m))
seen = set()
uniq = []
for t in out:
if t not in seen:
uniq.append(t)
seen.add(t)
return uniq
def gridify_1d(vec_1idx: np.ndarray, rows: int, cols: int) -> np.ndarray:
arr = vec_1idx[1:].copy()
if len(arr) != rows * cols:
raise ValueError("Grid shape mismatch")
return arr.reshape(rows, cols)
def df_to_csv_bytes(df: pd.DataFrame) -> bytes:
return df.to_csv(index=False).encode("utf-8")
def fig_to_png_bytes(fig) -> bytes:
buf = io.BytesIO()
fig.savefig(buf, format="png", dpi=160, bbox_inches="tight")
plt.close(fig)
return buf.getvalue()
# ---------- Sidebar ----------
st.sidebar.title("Controls")
data_src = st.sidebar.radio("Data source", ["Auto-download", "Upload CSV"])
years = st.sidebar.slider("Years of history (auto)", 1, 20, 5)
half_life = st.sidebar.slider("Recency half-life (days)", 30, 365, 180, step=15)
num_tix = st.sidebar.slider("Candidate tickets", 5, 30, 12, step=1)
seed = st.sidebar.number_input(
"Random seed",
value=123,
step=1,
key="random_seed",
help="Change this value to generate different number combinations. Press Enter or click outside the field to update.",
)
show_cooccur = st.sidebar.checkbox(
"Show pair co-occurrence heatmap (69×69)", value=False
)
uploaded_df = None
if data_src == "Upload CSV":
up = st.sidebar.file_uploader("CSV (date,w1..w5,mega)", type=["csv"])
if up is not None:
uploaded_df = load_uploaded(up.read())
# ---------- Load Data ----------
with st.spinner("Loading data…"):
if uploaded_df is not None:
df = uploaded_df
else:
df = fetch_history_years(years)
st.title("Powerball • Advanced Statistical Analysis")
st.caption(
"⚠️ **Educational Tool**: Lottery draws are truly random. Past patterns cannot predict future outcomes."
)
st.markdown(
f"**Draws loaded:** {len(df)} (from {df['date'].min().date()} to {df['date'].max().date()})"
)
# ---------- Analysis ----------
w_plain, m_plain = make_freq_tables(df, None)
weights = recency_weights(df["date"], half_life)
w_recent, m_recent = make_freq_tables(df, weights)
whites_rank_plain = sorted(
[(i, float(w_plain[i])) for i in range(1, WHITE_MAX + 1)],
key=lambda x: x[1],
reverse=True,
)
whites_rank_recent = sorted(
[(i, float(w_recent[i])) for i in range(1, WHITE_MAX + 1)],
key=lambda x: x[1],
reverse=True,
)
mega_rank_recent = sorted(
[(i, float(m_recent[i])) for i in range(1, MEGA_MAX + 1)],
key=lambda x: x[1],
reverse=True,
)
white_gap, mega_gap = days_since_last_seen(df)
# NEW: Advanced analysis
randomness_test = test_randomness(df)
white_streaks, mega_streaks = analyze_streaks(df)
top_pairs = pair_correlation_strength(df, top_n=15)
# ---------- Layout ----------
tab_labels = [
"Overview",
"Randomness Tests",
"Heat Maps",
"Advanced Analysis",
"Tickets",
"Exports",
]
if SEEDS_AVAILABLE:
tab_labels.append("Virtual Seeds")
tab1, tab2, tab3, tab4, tab5, tab6, *rest_tabs = st.tabs(tab_labels)
if SEEDS_AVAILABLE:
tab7 = rest_tabs[0]
with tab1:
c1, c2 = st.columns(2)
with c1:
st.subheader("High-Probability List (Plain Frequency)")
df_plain = pd.DataFrame(whites_rank_plain[:20], columns=["white", "plain_prob"])
st.dataframe(df_plain, use_container_width=True, hide_index=True)
with c2:
st.subheader("High-Probability List (Recency-Weighted)")
df_recent = pd.DataFrame(
whites_rank_recent[:20], columns=["white", "recency_prob"]
)
st.dataframe(df_recent, use_container_width=True, hide_index=True)
st.subheader("Powerball (Recency-Weighted)")
df_mega = pd.DataFrame(mega_rank_recent[:10], columns=["powerball", "recency_prob"])
st.dataframe(df_mega, use_container_width=True, hide_index=True)
st.subheader("Hot / Cold (Days Since Seen)")
cg = pd.DataFrame(
{"white": list(white_gap.keys()), "days_since_seen": list(white_gap.values())}
).sort_values("days_since_seen", ascending=False)
st.dataframe(cg.head(20), use_container_width=True, hide_index=True)
with tab2:
st.subheader("🔬 Statistical Randomness Tests")
st.markdown("### Chi-Square Goodness of Fit")
st.markdown(
"Tests whether number frequencies deviate significantly from uniform distribution (true randomness)."
)
col1, col2 = st.columns(2)
with col1:
st.metric("White Balls χ² statistic", f"{randomness_test['white_chi2']:.2f}")
st.metric("White Balls p-value", f"{randomness_test['white_pvalue']:.4f}")
if randomness_test["white_pvalue"] > 0.05:
st.success(
"✅ White balls appear consistent with random distribution (p > 0.05)"
)
else:
st.warning(
"⚠️ White balls show statistically significant deviation from uniformity"
)
with col2:
st.metric("Powerball χ² statistic", f"{randomness_test['mega_chi2']:.2f}")
st.metric("Powerball p-value", f"{randomness_test['mega_pvalue']:.4f}")
if randomness_test["mega_pvalue"] > 0.05:
st.success(
"✅ Powerball appears consistent with random distribution (p > 0.05)"
)
else:
st.warning(
"⚠️ Powerball shows statistically significant deviation from uniformity"
)
st.info(
"**Interpretation**: High p-values (> 0.05) indicate the lottery behaves as expected for a truly random system."
)
with tab3:
st.subheader("White Ball Frequency — Plain")
grid_p = gridify_1d(w_plain, 3, 23)
fig1, ax1 = plt.subplots(figsize=(10, 4))
im1 = ax1.imshow(grid_p, aspect="auto", cmap="viridis")
ax1.set_title("White Frequency (Plain)")
fig1.colorbar(im1)
st.pyplot(fig1, clear_figure=True)
img_plain = fig_to_png_bytes(fig1)
st.subheader("White Ball Frequency — Recency-Weighted")
grid_r = gridify_1d(w_recent, 3, 23)
fig2, ax2 = plt.subplots(figsize=(10, 4))
im2 = ax2.imshow(grid_r, aspect="auto", cmap="plasma")
ax2.set_title("White Frequency (Recency-Weighted)")
fig2.colorbar(im2)
st.pyplot(fig2, clear_figure=True)
img_recent = fig_to_png_bytes(fig2)
st.subheader("Powerball (Recency-Weighted) — Bar")
fig3, ax3 = plt.subplots(figsize=(10, 4))
xs = np.arange(1, MEGA_MAX + 1)
ax3.bar(xs, m_recent[1:], color="coral")
ax3.set_xlabel("Powerball Number")
ax3.set_ylabel("Weight")
ax3.set_title("Powerball Recency-Weighted Frequency")
ax3.grid(axis="y", alpha=0.3)
st.pyplot(fig3, clear_figure=True)
img_mega = fig_to_png_bytes(fig3)
with tab4:
st.subheader("🔍 Advanced Pattern Analysis")
st.markdown("### Strongest Pair Correlations")
st.markdown("Pairs that appear together more/less than expected by chance:")
pair_df = pd.DataFrame(
top_pairs, columns=["Num1", "Num2", "Observed", "Expected", "Ratio"]
)
pair_df["Deviation"] = ((pair_df["Ratio"] - 1) * 100).round(1)
st.dataframe(
pair_df[["Num1", "Num2", "Observed", "Expected", "Deviation"]].head(10),
use_container_width=True,
hide_index=True,
)
st.caption("Deviation shows % above/below expected if truly independent")
st.markdown("### Streak Statistics (Top 10 Most Variable)")
streak_data = []
for n, stats in white_streaks.items():
streak_data.append(
{
"Number": n,
"Mean Gap": round(stats["mean_gap"], 1),
"Std Gap": round(stats["std_gap"], 1),
"Max Gap": stats["max_gap"],
"Min Gap": stats["min_gap"],
}
)
streak_df = pd.DataFrame(streak_data).sort_values("Std Gap", ascending=False)
st.dataframe(streak_df.head(10), use_container_width=True, hide_index=True)
if show_cooccur:
st.subheader("White Ball Pair Co-Occurrence Heatmap")
with st.spinner("Computing co-occurrence…"):
C = cooccurrence_matrix(df)
fig4, ax4 = plt.subplots(figsize=(12, 10))
im4 = ax4.imshow(C[1:, 1:], aspect="auto", cmap="coolwarm")
ax4.set_title("White Pair Co-Occurrence (1..69)")
ax4.set_xlabel("White Ball Number")
ax4.set_ylabel("White Ball Number")
fig4.colorbar(im4)
st.pyplot(fig4, clear_figure=True)
img_co = fig_to_png_bytes(fig4)
else:
img_co = None
st.info("Enable 69×69 heatmap in sidebar (computationally intensive)")
with tab5:
st.subheader("🎫 Candidate Tickets")
st.warning(
"⚠️ **Remember**: All lottery combinations have EQUAL probability. These are for entertainment only."
)
# ============================================================
# TOP 10 GAMES - ADVANCED STATISTICAL ANALYSIS
# ============================================================
def generate_statistical_games(n_games=10, seed_value=123):
"""
Generate games using multiple statistical factors:
- Recency weighting
- Historical frequency
- Streak consistency
- Pair correlations
- Range balancing
"""
games = []
# Get numbers with favorable streak patterns (consistent gaps)
consistent_nums = []
for n, stats in white_streaks.items():
if stats["std_gap"] < 8: # Low variability = consistent
consistent_nums.append(n)
consistent_nums = consistent_nums[:20]
# Get numbers from strong pairs
pair_nums = set()
for n1, n2, _, _, ratio in top_pairs[:10]:
if ratio > 1.0: # Pairs that appear together more than expected
pair_nums.add(n1)
pair_nums.add(n2)
pair_nums = list(pair_nums)[:15]
# Combine all factors with weights
number_scores = np.zeros(WHITE_MAX + 1)
# Score based on recency (highest weight)
for i, (n, prob) in enumerate(whites_rank_recent[:30]):
number_scores[n] += (30 - i) * 3.0
# Score based on plain frequency
for i, (n, prob) in enumerate(whites_rank_plain[:30]):
number_scores[n] += (30 - i) * 2.0
# Bonus for consistent patterns
for n in consistent_nums:
number_scores[n] += 15
# Bonus for strong pair correlations
for n in pair_nums:
number_scores[n] += 10
# Get top scored numbers
top_scored = sorted(
range(1, WHITE_MAX + 1), key=lambda x: number_scores[x], reverse=True
)
# Generate diverse games from top scored numbers
rng = np.random.default_rng(seed_value)
for game_idx in range(n_games):
# Pick from top scored with some randomization
pool_size = min(30 + game_idx * 3, 50) # Expand pool for diversity
pool = top_scored[:pool_size]
# Ensure balanced coverage across ranges
selected = []
ranges = [
[n for n in pool if 1 <= n <= 14],
[n for n in pool if 15 <= n <= 28],
[n for n in pool if 29 <= n <= 42],
[n for n in pool if 43 <= n <= 56],
[n for n in pool if 57 <= n <= 69],
]
# Try to get one from each range
for r in ranges:
if r and len(selected) < 5:
selected.append(rng.choice(r))
# Fill remaining with top scores
while len(selected) < 5:
candidates = [n for n in pool if n not in selected]
if candidates:
selected.append(candidates[0])
else:
break
whites = tuple(sorted(selected[:5]))
# Pick Powerball from top recency
top_pb = [n for n, _ in mega_rank_recent[:8]]
powerball = top_pb[game_idx % len(top_pb)]
games.append((whites, powerball))
return games
# Display section for Top 10 Statistical Games
st.markdown("---")
st.markdown("### 📊 Top 10 Games - Advanced Statistical Analysis")
st.markdown(
"**Based on:** Recency weighting, frequency analysis, pair correlations, and streak patterns"
)
# Regenerate button to force update when seed changes
col1, col2 = st.columns([3, 1])
with col1:
st.caption(
f"Current seed: **{seed}** - Change seed in sidebar and numbers will update automatically"
)
with col2:
if st.button("🔄 Regenerate", use_container_width=True, key="regenerate_stats"):
st.rerun()
# Generate the games (using current seed value - pass explicitly for reactivity)
statistical_games = generate_statistical_games(n_games=10, seed_value=seed)
# Create DataFrame for clean display
df_statistical = pd.DataFrame(
[
{
"Game": i + 1,
"White Balls": f"{w[0]:2d} - {w[1]:2d} - {w[2]:2d} - {w[3]:2d} - {w[4]:2d}",
"Powerball": f"{m:02d}",
"Raw": f"{w[0]}, {w[1]}, {w[2]}, {w[3]}, {w[4]}, PB:{m}",
}
for i, (w, m) in enumerate(statistical_games)
]
)
# Display the table (key includes seed to force update when seed changes)
st.dataframe(
df_statistical[["Game", "White Balls", "Powerball"]],
use_container_width=True,
hide_index=True,
key=f"statistical_games_{seed}",
)
# Methodology explanation
with st.expander("📖 How these games were selected"):
st.markdown(
"""
**Statistical Factors Used:**
1. **Recency Weight (3x)**: Numbers that appeared recently get highest priority
- Recent draws weighted exponentially more than old draws
2. **Historical Frequency (2x)**: Numbers that appear most often overall
- All-time frequency across entire dataset
3. **Consistency Bonus (+15 points)**: Numbers with predictable gap patterns
- Low standard deviation in gaps between appearances
4. **Pair Correlation Bonus (+10 points)**: Numbers that appear together more than expected
- Based on chi-square analysis of pair frequencies
5. **Range Balancing**: Ensures coverage across all number ranges
- One number from each: 1-14, 15-28, 29-42, 43-56, 57-69
**Powerball Selection:** Rotates through top 8 most frequent Powerballs by recency
**Scoring Formula:**
```
Score = (Recency_Rank × 3) + (Frequency_Rank × 2) + Consistency_Bonus + Pair_Bonus
```
⚠️ **Critical Reminder**: These are statistically-informed selections for entertainment only.
In reality, every combination has exactly the same **1 in 292,201,338** probability.
Past patterns cannot predict future random draws.
"""
)
# Download button
st.download_button(
"⬇️ Download Statistical Games (CSV)",
data=df_to_csv_bytes(
df_statistical[["Game", "Raw"]].rename(columns={"Raw": "Numbers"})
),
file_name="powerball_statistical_games.csv",
use_container_width=True,
)
st.info(
"💡 **Tip**: Change the 'Random seed' in the sidebar to generate different game combinations using the same statistical criteria."
)
# ============================================================
# VIRTUAL SEEDS METHOD SELECTOR
# ============================================================
if SEEDS_AVAILABLE:
st.markdown("---")
st.markdown("### 🧬 Tickets from Virtual Seeds Methods")
st.markdown(
"**Generate tickets using pattern-based seed prediction methods from the Virtual Seeds analysis.**"
)
# Method selection
col1, col2 = st.columns([2, 1])
with col1:
signature_type = st.selectbox(
"Signature Method:",
[
"master_signature",
"sum_signature",
"odd_even_signature",
"low_high_signature",
"prime_signature",
"modulo_signature",
],
help="Different ways to convert draw numbers into a virtual seed",
)
with col2:
vs_lookback = st.slider(
"Lookback draws:",
30,
200,
100,
step=10,
help="Number of recent draws to analyze for patterns",
)
# Get predictions
with st.spinner("Analyzing patterns and predicting seeds..."):
seed_df = assign_virtual_seeds_to_history(df, use_signature=signature_type)
patterns = detect_seed_patterns(seed_df, lookback=vs_lookback)
predictions = predict_next_seed_from_patterns(
seed_df.tail(vs_lookback), patterns
)
if predictions:
# Method selector
available_methods = list(predictions.keys())
selected_method = st.selectbox(
"Choose Prediction Method:",
available_methods,
help="Each method predicts a virtual seed based on different pattern analysis. "
"Tickets will be generated using that predicted seed.",
)
if selected_method:
pred_seed = predictions[selected_method]
# Get historical error statistics for this method
with st.spinner("Calculating historical error statistics..."):
bt_df = backtest_seed_methods(
seed_df,
min_lookback=min(50, vs_lookback),
step=max(1, len(seed_df) // 100),
)
# Calculate average error for this specific method
method_error_col = f"err_{selected_method}"
if not bt_df.empty and method_error_col in bt_df.columns:
avg_error = bt_df[method_error_col].mean()
median_error = bt_df[method_error_col].median()
min_error = bt_df[method_error_col].min()
max_error = bt_df[method_error_col].max()
method_wins = (bt_df["best_method"] == selected_method).sum()
total_tests = len(bt_df)
win_rate = (
(method_wins / total_tests * 100) if total_tests > 0 else 0
)
else:
avg_error = None
median_error = None
min_error = None
max_error = None
win_rate = None
# Display prediction and error info
col1, col2 = st.columns([2, 1])
with col1:
error_info = (
f"**{selected_method}** predicted seed: **{pred_seed}**"
)
if avg_error is not None:
error_info += "\n\n📊 **Historical Performance:**\n"
error_info += f"- Average error: **{avg_error:.1f}** (out of 0-9999 range)\n"
error_info += f"- Median error: **{median_error:.1f}**\n"
error_info += (
f"- Error range: {min_error:.0f} - {max_error:.0f}\n"
)
error_info += f"- Win rate: **{win_rate:.1f}%** ({method_wins}/{total_tests} draws)"
st.info(error_info)
with col2:
st.caption(f"Using randomization seed: {seed}")
# Generate tickets using the predicted seed
num_vs_tickets = st.slider(
"Number of tickets per seed variant:",
1,
10,
3,
step=1,
key="vs_tickets_count",
)
# Generate tickets with error adjustments
seed_variants = {}
# Base prediction
seed_variants[f"Base ({pred_seed})"] = pred_seed
# Error-adjusted seeds
if avg_error is not None:
# Add error (predicted + average error)
seed_plus_error = int((pred_seed + avg_error) % 10000)
seed_variants[f"+Error ({seed_plus_error})"] = seed_plus_error
# Subtract error (predicted - average error)
seed_minus_error = int((pred_seed - avg_error) % 10000)
seed_variants[f"-Error ({seed_minus_error})"] = seed_minus_error
# Half error adjustments
seed_plus_half = int((pred_seed + avg_error / 2) % 10000)
seed_variants[f"+Half Error ({seed_plus_half})"] = seed_plus_half
seed_minus_half = int((pred_seed - avg_error / 2) % 10000)
seed_variants[f"-Half Error ({seed_minus_half})"] = seed_minus_half
else:
# If no error data, just show base
pass
# Display option to show/hide error variants
show_error_variants = st.checkbox(
"Show error-adjusted seed variants",
value=True,
help="Generate tickets using predicted seed ± error adjustments based on historical performance",
)
if show_error_variants and avg_error is not None:
variants_to_show = seed_variants
else:
variants_to_show = {f"Base ({pred_seed})": pred_seed}
# Generate and display tickets for each variant
all_tickets_data = []
for variant_name, variant_seed in variants_to_show.items():
vs_tickets = sample_tickets_from_probs(
w_recent,
m_recent,
k_tickets=num_vs_tickets,
seed=variant_seed,
)
for i, (w, m) in enumerate(vs_tickets):
all_tickets_data.append(
{
"Variant": variant_name,
"Ticket": i + 1,
"White Balls": f"{w[0]:2d} - {w[1]:2d} - {w[2]:2d} - {w[3]:2d} - {w[4]:2d}",
"Powerball": f"{m:02d}",
"Raw": f"{w[0]}, {w[1]}, {w[2]}, {w[3]}, {w[4]}, PB:{m}",
}
)
df_vs = pd.DataFrame(all_tickets_data)
# Display tickets
if len(variants_to_show) > 1:
# Show with variant column
st.dataframe(
df_vs[["Variant", "Ticket", "White Balls", "Powerball"]],
use_container_width=True,
hide_index=True,
key=f"vs_tickets_{selected_method}_{pred_seed}",
)
else:
# Show without variant column
st.dataframe(
df_vs[["Ticket", "White Balls", "Powerball"]],
use_container_width=True,
hide_index=True,
key=f"vs_tickets_{selected_method}_{pred_seed}",
)
# Download button
st.download_button(
f"⬇️ Download {selected_method} Tickets (CSV)",
data=df_to_csv_bytes(
df_vs[["Ticket", "Raw"]].rename(columns={"Raw": "Numbers"})
),
file_name=f"tickets_{selected_method.lower().replace(' ', '_')}.csv",
use_container_width=True,
)
with st.expander(f"📖 About {selected_method}"):
method_descriptions = {
"Linear Trend": "Extrapolates seed values based on a linear trend detected in recent draws.",
"Oscillation": "Predicts seed based on detected sine wave patterns in seed evolution.",
"Cycle-": "Uses repeating cycle patterns - looks back one cycle length to predict next seed.",
"Autocorr-": "Uses autocorrelation - predicts seed based on correlation with previous seeds at specific lags.",
"Range Pattern": "Predicts seed within the most frequently occurring seed range.",
"Ensemble": "Weighted average of all available methods, weighted by their pattern strength.",
}
desc = next(
(
v
for k, v in method_descriptions.items()
if selected_method.startswith(k)
),
"Pattern-based seed prediction method.",
)
st.markdown(f"**{selected_method}**: {desc}")
st.markdown(
f"**Predicted Seed**: {pred_seed} \n"
f"**Pattern Strength**: {patterns.get('linear_trend', {}).get('strength', 0)*100:.1f}% (if applicable)"
)
else:
st.warning(
"No strong patterns detected. Try adjusting the lookback period or signature method."
)
st.markdown("---")
st.markdown("### 🎲 Other Ticket Generation Methods")
# Generate tickets at tab level so they're accessible for exports