-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
1101 lines (927 loc) · 40.4 KB
/
app.py
File metadata and controls
1101 lines (927 loc) · 40.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from __future__ import annotations
import os
from pathlib import Path
import re
import pandas as pd
import streamlit as st
from scripts.run_webinar_neoserra_match import (
run_webinar_neoserra_match,
)
from scripts.center_loading import CENTERS_PATH
from scripts.name_cleaning import find_name_collisions
from scripts.overwriting import (
create_people_overwrite_from_collisions,
update_people_overwrite_with_new_collisions,
get_unreviewed_overwrite_rows,
apply_attendance_removals_from_people_overwrite,
apply_people_overwrites,
)
from scripts.neoserra_helper import add_zip_geography
from scripts.kpis import generate_webinar_kpis
from scripts.attendance_plots import (
prepare_webinar_kpis_for_plotting,
get_default_plot_style,
make_attendance_counts_figure,
make_engagement_rate_figure,
plot_audience_participation_stacked,
plot_attendance_composition,
plot_client_composition_per_webinar,
)
from scripts.center_mapping import (
map_centers_for_nonclients,
map_centers_for_clients,
make_all_attendees_zip_map_single_colored,
map_centers_for_run_clients,
)
from scripts.center_splitting import (
build_latest_attended_center_reports,
_to_date_series,
)
import matplotlib.pyplot as plt
# ============================
# Page config + light styling
# ============================
st.set_page_config(page_title="SmallBiz Talks", layout="wide")
with st.sidebar:
if st.button("Quit App"):
os._exit(0)
st.markdown(
"""
<style>
.block-container { padding-top: 1.2rem; padding-bottom: 2rem; max-width: 1300px; }
h1, h2, h3 { letter-spacing: -0.01em; }
[data-testid="stMetricLabel"] > div { font-size: 0.9rem; opacity: 0.85; }
.stAlert > div { padding-top: 0.65rem; padding-bottom: 0.65rem; }
footer { visibility: hidden; }
</style>
""",
unsafe_allow_html=True,
)
st.title("SmallBiz Talks — Webinar → NeoSerra Match")
st.caption("Runs locally on your computer. Files stay on this machine.")
# ============================
# Session state init
# ============================
if "batch_df" not in st.session_state:
st.session_state.batch_df = None
if "success_runs" not in st.session_state:
# list[dict] where each dict holds per-success-run artifacts
st.session_state.success_runs = []
if "output_paths" not in st.session_state:
st.session_state.output_paths = []
if "last_run_meta" not in st.session_state:
st.session_state.last_run_meta = {}
if "center_report_dates" not in st.session_state:
st.session_state.center_report_dates = []
# ============================
# Helpers
# ============================
def _write_upload_to_disk(
upload: st.runtime.uploaded_file_manager.UploadedFile, dest: Path
) -> Path:
"""Write a Streamlit upload to disk (so our pipeline can consume file paths)."""
dest.parent.mkdir(parents=True, exist_ok=True)
dest.write_bytes(upload.getvalue())
return dest
def _fmt_int(x) -> str:
try:
return f"{int(x):,}"
except Exception:
return "—"
def _parse_webinar_filename(name: str) -> dict:
"""
Best-effort parse:
attendee_{webinar_id}_YYYY_MM_DD(.csv)
Returns dict with webinar_id, webinar_date, and ok flag.
"""
m = re.search(r"attendee_(?P<id>\d+?)_(?P<y>\d{4})_(?P<m>\d{2})_(?P<d>\d{2})", name)
if not m:
return {"ok": False, "webinar_id": None, "webinar_date": None}
webinar_id = m.group("id")
webinar_date = f"{m.group('y')}-{m.group('m')}-{m.group('d')}"
return {"ok": True, "webinar_id": webinar_id, "webinar_date": webinar_date}
def render_and_close(fig):
st.pyplot(fig, clear_figure=True)
plt.close(fig)
def save_fig_overwrite(fig, path: Path, dpi: int = 150) -> None:
"""
Save a matplotlib figure to disk, overwriting if it already exists.
Also closes the figure to prevent memory leaks.
"""
path.parent.mkdir(parents=True, exist_ok=True)
fig.savefig(path, dpi=dpi, bbox_inches="tight")
def _safe_multiselect_defaults(
options: list, desired: list | None, *, fallback: str = "last"
) -> list:
"""
Ensure multiselect defaults are always valid:
- keep only values that exist in options
- if nothing remains, fall back to [options[-1]] or [options[0]]
"""
if not options:
return []
desired = desired or []
opt_set = set(options)
cleaned = [d for d in desired if d in opt_set]
if cleaned:
return cleaned
return [options[-1]] if fallback == "last" else [options[0]]
def _load_masters(
people_master_path: str,
attendance_master_path: str,
*,
base_path: Path,
):
"""
Prefer FINAL outputs if they exist (post-overwrite), else fall back to MASTER.
Returns: (attendance_df, people_df, meta_dict)
"""
# finals live under base_path/outputs/
people_final_path, attendance_final_path = _final_paths(base_path)
# choose paths
ppl_path = (
people_final_path if people_final_path.exists() else Path(people_master_path)
)
att_path = (
attendance_final_path
if attendance_final_path.exists()
else Path(attendance_master_path)
)
if not (ppl_path.exists() and att_path.exists()):
return None, None, {"people_source": None, "attendance_source": None}
people_df = pd.read_csv(ppl_path)
attendance_df = pd.read_csv(att_path)
meta = {
"people_source": "FINAL" if ppl_path == people_final_path else "MASTER",
"attendance_source": "FINAL" if att_path == attendance_final_path else "MASTER",
"people_path": str(ppl_path),
"attendance_path": str(att_path),
}
return attendance_df, people_df, meta
def _final_paths(base_path: Path) -> tuple[Path, Path]:
outputs_dir = base_path / "outputs"
return outputs_dir / "people_final.csv", outputs_dir / "attendance_final.csv"
# ============================
# Sidebar: Inputs + settings
# ============================
with st.sidebar:
st.header("1) Upload files")
webinar_files = st.file_uploader(
"Zoom webinar attendee CSV(s)",
type=["csv"],
accept_multiple_files=True,
help="Upload one or many webinar attendee exports. The pipeline will run once per file.",
)
neoserra_file = st.file_uploader("NeoSerra clients CSV", type=["csv"])
centers_file = st.file_uploader(
"Centers CSV (optional override)",
type=["csv"],
help="Leave blank to use the bundled centers.csv packaged with the app.",
)
st.caption(f"Default (bundled): {CENTERS_PATH}")
st.divider()
st.header("2) Output folder")
base_dir = st.text_input(
"Local output folder",
value=str(Path.home() / "SmallBizTalksOutputs"),
help="Where run outputs and review files will be saved.",
)
base_path = Path(base_dir)
base_path.mkdir(parents=True, exist_ok=True)
# Defaults
people_master_default = base_path / "people_master.csv"
attendance_master_default = base_path / "attendance_master.csv"
cache_default = base_path / "zip_to_center_lookup.csv"
with st.expander("Advanced paths (optional)", expanded=False):
people_master_path = st.text_input(
"people_master.csv", value=str(people_master_default)
)
attendance_master_path = st.text_input(
"attendance_master.csv", value=str(attendance_master_default)
)
cache_path = st.text_input(
"zip_to_center_lookup.csv (cache)", value=str(cache_default)
)
st.divider()
st.header("Batch options")
overwrite_outputs = st.toggle(
"Overwrite output CSVs",
value=False,
help="If an output CSV already exists for a webinar file, overwrite it.",
)
continue_on_error = st.toggle(
"Continue on error",
value=True,
help="If one file fails, keep going with the rest.",
)
st.divider()
run_btn = st.button("Run pipeline", type="primary", width="stretch")
# Optional: clear stored batch results
clear_btn = st.button("Clear last batch results", width="stretch")
if clear_btn:
st.session_state.batch_df = None
st.session_state.success_runs = []
st.session_state.output_paths = []
st.session_state.last_run_meta = {}
st.toast("Cleared last batch results.")
# ============================
# Main: Guided status
# ============================
st.divider()
tab_run, tab_kpis, tab_reports, tab_maps, tab_review = st.tabs(
["Run Pipeline", "Dashboard (KPIs)", "Center Reports", "Maps", "Batch + Review"]
)
# -------------------------
# TAB 1: Run Pipeline
# -------------------------
with tab_run:
st.subheader("Run pipeline")
ready = bool(webinar_files and neoserra_file)
with st.container(border=True):
st.subheader("How this works")
a, b, c, d = st.columns(4)
a.markdown("**1. Upload**")
a.caption("Webinar file(s) + NeoSerra + Centers")
b.markdown("**2. Run**")
b.caption("Matches each webinar to NeoSerra")
c.markdown("**3. Update masters**")
c.caption("People + attendance master CSVs")
d.markdown("**4. Review**")
d.caption("Invalid emails, collisions, enriched deltas")
if ready:
st.success(f"Ready. {len(webinar_files)} webinar file(s) uploaded.")
else:
st.info(
"Upload **NeoSerra** and at least **one webinar file** in the sidebar."
)
# ============================
# Run (batch) — stores results into session_state
# ============================
if run_btn:
if not ready:
st.error("Please upload NeoSerra, centers, and at least one webinar file.")
st.stop()
run_dir = base_path / "_runs"
run_dir.mkdir(parents=True, exist_ok=True)
neoserra_path = _write_upload_to_disk(
neoserra_file, run_dir / neoserra_file.name
)
# Centers: use bundled by default, but allow override upload
if centers_file is not None:
centers_path = _write_upload_to_disk(
centers_file, run_dir / centers_file.name
)
else:
centers_path = CENTERS_PATH
if not centers_path.exists():
st.error(f"Bundled centers.csv not found at: {centers_path}")
st.stop()
# Reset stored batch results (new run)
st.session_state.batch_df = None
st.session_state.success_runs = []
st.session_state.output_paths = []
st.session_state.last_run_meta = {
"base_dir": str(base_path),
"run_dir": str(run_dir),
"neoserra_path": str(neoserra_path),
"centers_path": str(centers_path),
"people_master_path": str(people_master_path),
"attendance_master_path": str(attendance_master_path),
"cache_path": str(cache_path),
}
with st.container(border=True):
st.subheader("Run plan")
colL, colR = st.columns(2)
with colL:
st.markdown("**Inputs**")
st.code(str(neoserra_path), language=None)
st.code(str(centers_path), language=None)
with colR:
st.markdown("**Outputs**")
st.code(str(Path(people_master_path)), language=None)
st.code(str(Path(attendance_master_path)), language=None)
st.code(str(Path(cache_path)), language=None)
st.subheader("Batch run")
progress = st.progress(0.0)
status = st.empty()
batch_rows: list[dict] = []
output_paths: list[Path] = []
total = len(webinar_files)
for i, wf in enumerate(webinar_files, start=1):
meta = _parse_webinar_filename(wf.name)
label_bits = [wf.name]
if meta["ok"]:
label_bits.append(
f"(id={meta['webinar_id']} date={meta['webinar_date']})"
)
status.info(f"Running {i}/{total}: " + " ".join(label_bits))
try:
webinar_path = _write_upload_to_disk(wf, run_dir / wf.name)
output_path = base_path / (Path(wf.name).stem + "_with_neoserra.csv")
# skip if exists and not overwriting
if output_path.exists() and not overwrite_outputs:
batch_rows.append(
{
"webinar_file": wf.name,
"webinar_id": meta.get("webinar_id"),
"webinar_date": meta.get("webinar_date"),
"status": "skipped (output exists)",
"output_csv": str(output_path),
}
)
output_paths.append(output_path)
progress.progress(i / total)
continue
with st.spinner(f"Processing: {wf.name}"):
results = run_webinar_neoserra_match(
webinar_file=webinar_path,
neoserra_file=neoserra_path,
centers_file=centers_path,
output_path=output_path,
people_master_path=people_master_path,
attendance_master_path=attendance_master_path,
cache_path=cache_path,
print_summary=False,
)
summary = results["summary"]
# store per-run review artifacts in session_state
st.session_state.success_runs.append(
{
"webinar_file": wf.name,
"output_path": str(output_path),
"summary": summary,
"results": {
"webinar_invalid_emails": results.get(
"webinar_invalid_emails"
),
"people_name_collision_df": results.get(
"people_name_collision_df"
),
"people_enriched_before": results.get(
"people_enriched_before"
),
"people_enriched_after": results.get(
"people_enriched_after"
),
},
}
)
batch_rows.append(
{
"webinar_file": wf.name,
"webinar_id": meta.get("webinar_id"),
"webinar_date": meta.get("webinar_date"),
"status": "ok",
"session_rows": getattr(summary, "session_rows", None),
"unique_emails": getattr(
summary, "session_unique_emails", None
),
"attendance_added": getattr(summary, "attendance_added", None),
"attendance_overwritten": getattr(
summary, "attendance_overwritten", None
),
"people_new": getattr(summary, "people_new", None),
"people_enriched": getattr(summary, "people_enriched", None),
"output_csv": str(output_path),
}
)
output_paths.append(output_path)
except Exception as e:
batch_rows.append(
{
"webinar_file": wf.name,
"webinar_id": meta.get("webinar_id"),
"webinar_date": meta.get("webinar_date"),
"status": f"error: {type(e).__name__}",
"error": str(e),
}
)
if not continue_on_error:
status.error(f"Stopped on error ({wf.name}): {e}")
break
progress.progress(i / total)
status.success("Batch complete.")
st.session_state.batch_df = pd.DataFrame(batch_rows)
st.session_state.output_paths = [str(p) for p in output_paths]
# ============================
# Post-batch: collisions -> overwrite -> finals
# ============================
with st.container(border=True):
st.subheader("Post-batch review artifacts + final outputs")
outputs_dir = base_path / "outputs"
outputs_dir.mkdir(parents=True, exist_ok=True)
overwrites_dir = base_path / "overwrites"
overwrites_dir.mkdir(parents=True, exist_ok=True)
people_master_df = None
attendance_master_df = None
name_collisions_final_path = None
collision_groups_final = None
collision_rows_final = None
unreviewed_count = None
# Paths
name_collisions_master_path = outputs_dir / "name_collisions_master.csv"
people_overwrite_path = overwrites_dir / "people_overwrite.xlsx"
people_final_path = outputs_dir / "people_final.csv"
attendance_final_path = outputs_dir / "attendance_final.csv"
# 1) Load masters produced by the batch loop
if not Path(people_master_path).exists():
st.warning(
"people_master.csv not found; skipping collisions/overwrites/finals."
)
else:
people_master_df = pd.read_csv(people_master_path)
attendance_master_df = None
if Path(attendance_master_path).exists():
attendance_master_df = pd.read_csv(attendance_master_path)
with st.spinner("Computing name collisions master…"):
_, collisions_df = find_name_collisions(
people_master_df,
name_col="full_name_clean",
)
collisions_df.to_csv(name_collisions_master_path, index=False)
collision_groups_master = (
collisions_df["full_name_clean"].nunique()
if not collisions_df.empty
and "full_name_clean" in collisions_df.columns
else 0
)
collision_rows_master = len(collisions_df)
with st.spinner("Creating/updating overwrite file…"):
if not people_overwrite_path.exists():
overwrite_df = create_people_overwrite_from_collisions(
collisions_df
)
else:
overwrite_existing = pd.read_excel(
people_overwrite_path, engine="openpyxl"
).fillna("")
overwrite_df = update_people_overwrite_with_new_collisions(
overwrite_existing,
collisions_df,
name_col="full_name_clean",
email_col="email_clean",
)
overwrite_df.to_excel(
people_overwrite_path, index=False, engine="openpyxl"
)
# Show unreviewed count
unreviewed_df = get_unreviewed_overwrite_rows(
overwrite_df, include_add=False
)
st.info(
f"Overwrite rows needing review (blank/invalid action): {len(unreviewed_df)}"
)
unreviewed_count = len(unreviewed_df)
# reviewed rows = collision rows - unreviewed rows (ignore ADD rows)
overwrite_reviewed_rows = max(0, len(overwrite_df) - unreviewed_count)
# 2) Apply overwrites to build finals
with st.spinner("Building people_final + attendance_final…"):
people_final_df, people_info = apply_people_overwrites(
people_master_df,
overwrite_df,
email_col="email_clean",
require_approved=True,
)
people_final_df.to_csv(people_final_path, index=False)
att_info = None
if attendance_master_df is not None:
attendance_final_df, att_info = (
apply_attendance_removals_from_people_overwrite(
attendance_master_df,
overwrite_df,
email_col="email_clean",
require_approved=True,
)
)
attendance_final_df.to_csv(attendance_final_path, index=False)
# Compute collisions AFTER overwrites
_, collisions_final_df = find_name_collisions(
people_final_df,
name_col="full_name_clean",
)
name_collisions_final_path = outputs_dir / "name_collisions_final.csv"
collisions_final_df.to_csv(name_collisions_final_path, index=False)
collision_groups_final = (
collisions_final_df["full_name_clean"].nunique()
if not collisions_final_df.empty
and "full_name_clean" in collisions_final_df.columns
else 0
)
collision_rows_final = len(collisions_final_df)
# Small summary
st.success(
"Saved: name_collisions_master, people_overwrite, people_final"
+ (", attendance_final" if att_info else "")
)
st.caption(
f"People removed: {people_info['removed_rows']} | "
f"People added: {people_info['added_rows']} | "
f"People final rows: {people_info['final_rows']}"
)
if att_info:
st.caption(
f"Attendance removed: {att_info['removed_rows']} | "
f"Attendance final rows: {att_info['final_rows']} | "
f"Collisions after overwrites (FINAL): {collision_groups_final} group(s), {collision_rows_final} row(s). | "
f"Unreviewed overwrite rows: {unreviewed_count}."
)
if people_master_df is not None:
st.session_state.last_run_meta.update(
{
"name_collisions_master_path": str(name_collisions_master_path),
"people_overwrite_path": str(people_overwrite_path),
"people_final_path": str(people_final_path),
"attendance_final_path": str(attendance_final_path),
}
)
if name_collisions_final_path is not None:
st.session_state.last_run_meta.update(
{
"name_collisions_final_path": str(name_collisions_final_path),
"global_collision_groups_final": int(collision_groups_final),
"global_collision_rows_final": int(collision_rows_final),
"overwrite_unreviewed_rows": int(unreviewed_count),
"global_collision_groups_master": int(collision_groups_master),
"global_collision_rows_master": int(collision_rows_master),
"overwrite_reviewed_rows": int(overwrite_reviewed_rows),
}
)
# ============================
# Render stored batch results (persists across reruns)
# ============================
batch_df = st.session_state.batch_df
success_runs = st.session_state.success_runs
stored_output_paths = (
[Path(p) for p in st.session_state.output_paths]
if st.session_state.output_paths
else []
)
# -------------------------
# TAB 2: KPIs Dashboard
# -------------------------
with tab_kpis:
st.subheader("Webinar KPIs (overall)")
attendance_master_df, people_master_df, src = _load_masters(
people_master_path, attendance_master_path, base_path=base_path
)
st.caption(
f"Using: people={src['people_source']}, attendance={src['attendance_source']}"
)
if attendance_master_df is None or people_master_df is None:
st.info("Masters not found yet. Run the pipeline first.")
else:
kpi_out_dir_path = base_path / "kpis"
kpi_out_dir_path.mkdir(parents=True, exist_ok=True)
webinar_kpis = generate_webinar_kpis(
attendance=attendance_master_df,
out_dir=kpi_out_dir_path,
people_master=people_master_df,
)
st.caption(f"Saved: {kpi_out_dir_path / 'webinar_kpis.csv'}")
st.dataframe(webinar_kpis, width="stretch")
# Figures
df_plot = prepare_webinar_kpis_for_plotting(webinar_kpis, window=4)
style = get_default_plot_style()
fig_counts, _ = make_attendance_counts_figure(df_plot, window=4, style=style)
fig_rate, _ = make_engagement_rate_figure(df_plot, window=4, style=style)
fig_audience, _ = plot_audience_participation_stacked(webinar_kpis, style=style)
fig_comp, _ = plot_attendance_composition(webinar_kpis, style=style)
fig_client_comp, _ = plot_client_composition_per_webinar(
webinar_kpis, style=style
)
# Save figures
plots_dir = kpi_out_dir_path / "plots"
plots_dir.mkdir(exist_ok=True)
save_fig_overwrite(fig_counts, plots_dir / "attendance_counts.png")
save_fig_overwrite(fig_rate, plots_dir / "engagement_rate.png")
save_fig_overwrite(fig_audience, plots_dir / "audience_participation.png")
save_fig_overwrite(fig_comp, plots_dir / "attendance_composition.png")
save_fig_overwrite(fig_client_comp, plots_dir / "client_composition.png")
st.caption(f"Saved: {kpi_out_dir_path / 'plots' / 'webinar_kpis.csv'}")
st.subheader("Attendance over time")
render_and_close(fig_counts)
st.subheader("Engagement rate")
render_and_close(fig_rate)
st.subheader("Audience participation (Total audience split)")
render_and_close(fig_audience)
st.subheader("Attendee composition (First-time vs Repeat)")
render_and_close(fig_comp)
st.subheader("Client composition of attendees")
render_and_close(fig_client_comp)
# -------------------------
# TAB 3: Center Reports
# -------------------------
with tab_reports:
st.subheader("Center reports (Latest attended per person)")
attendance_master_df, people_master_df, src = _load_masters(
people_master_path, attendance_master_path, base_path=base_path
)
st.caption(
f"Using: people={src['people_source']}, attendance={src['attendance_source']}"
)
if attendance_master_df is None or people_master_df is None:
st.info("Masters not found yet. Run the pipeline first.")
else:
DATE_COL = "Webinar Date"
if DATE_COL not in attendance_master_df.columns:
st.error(f"attendance_master is missing '{DATE_COL}'.")
else:
_dates = _to_date_series(attendance_master_df[DATE_COL])
available_dates = sorted({d for d in _dates.dropna().tolist()})
if not available_dates:
st.info("No webinar dates found in attendance_master.")
else:
default_dates = _safe_multiselect_defaults(
options=available_dates,
desired=st.session_state.get("center_report_dates"),
fallback="last", # or "first"
)
picked_dates = st.multiselect(
"Select webinar date(s) to include",
options=available_dates,
default=default_dates,
key="center_report_date_picker",
help="We keep only attended=True rows, then keep the latest date per person.",
)
st.session_state.center_report_dates = picked_dates
report_prefix = st.text_input(
"Output file prefix",
value="latest_attended_selected_dates",
key="center_report_prefix",
)
report_out_dir = base_path / "center_reports" / report_prefix
report_out_dir.mkdir(parents=True, exist_ok=True)
run_center_report_btn = st.button(
"Generate center report CSVs",
type="primary",
use_container_width=True,
key="center_report_generate_btn",
)
if run_center_report_btn:
if not picked_dates:
st.error("Pick at least one webinar date.")
st.stop()
with st.spinner("Building center reports..."):
result = build_latest_attended_center_reports(
attendance=attendance_master_df,
people=people_master_df,
include_dates=[str(d) for d in picked_dates],
output_dir=report_out_dir,
prefix=report_prefix,
attendance_key="email_clean",
attendance_date_col=DATE_COL,
attendance_attended_col="Attended",
final_center_col="Final Center",
)
st.success(f"Saved {len(result['paths'])} center report file(s).")
st.caption(f"Folder: {report_out_dir}")
# ---- Preview from disk (no session_state needed) ----
st.divider()
st.markdown("### Preview saved center reports")
# Find CSVs matching the prefix
csvs = sorted(report_out_dir.glob(f"{report_prefix}_*.csv"))
if not csvs:
st.info(
"No center report CSVs found yet. Generate the report first."
)
else:
# Show center names by stripping prefix_
def _label(p: Path) -> str:
name = p.stem # no .csv
if name.startswith(report_prefix + "_"):
return name[len(report_prefix) + 1 :]
return name
center_options = {_label(p): p for p in csvs}
picked_center = st.selectbox(
"Preview a center",
options=sorted(center_options.keys()),
key="center_report_center_pick",
)
preview_path = center_options[picked_center]
st.caption(f"File: {preview_path}")
preview_df = pd.read_csv(preview_path)
st.dataframe(preview_df, width="stretch", hide_index=True)
# -------------------------
# TAB 4: Maps
# -------------------------
with tab_maps:
st.subheader("Center mapping (clients vs non-clients)")
meta = st.session_state.last_run_meta
attendance_master_df, people_master_df, src = _load_masters(
people_master_path, attendance_master_path, base_path=base_path
)
st.caption(
f"Using: people={src['people_source']}, attendance={src['attendance_source']}"
)
required_keys = ["neoserra_path", "centers_path", "cache_path"]
if any(k not in meta for k in required_keys):
st.info("Run the pipeline first so inputs are available for post-processing.")
elif people_master_df is None:
st.info("People file not found yet. Run the pipeline first.")
else:
neoserra_path = Path(meta["neoserra_path"])
centers_path = Path(meta["centers_path"])
cache_path = Path(meta["cache_path"])
if not neoserra_path.exists():
st.info("NeoSerra file not found yet. Run the pipeline first.")
elif not centers_path.exists():
st.error(f"Centers file not found: {centers_path}")
else:
neoserra_raw_df = pd.read_csv(neoserra_path)
centers_df = pd.read_csv(centers_path)
zip_lookup_df = pd.read_csv(cache_path) if cache_path.exists() else None
map_out_dir = base_path / "center_mapping"
map_out_dir.mkdir(parents=True, exist_ok=True)
out_nonclients_html = map_out_dir / "nonclients_zip_footprint.html"
out_clients_html = map_out_dir / "clients_zip_footprint.html"
# NEW
out_run_clients_html = map_out_dir / "run_clients_zip_footprint.html"
out_all_people_html = map_out_dir / "all_people_zip_footprint.html"
if st.button("Generate maps", type="primary", use_container_width=True):
with st.spinner("Generating non-client ZIP footprint map..."):
map_centers_for_nonclients(
people_master_df=people_master_df,
centers_df=centers_df,
zip_lookup_df=zip_lookup_df,
raw_zip_col="Zip/Postal Code",
out_html=out_nonclients_html,
)
with st.spinner(
"Generating client ZIP footprint map (ALL NeoSerra clients)..."
):
map_centers_for_clients(
neoserra_df=neoserra_raw_df,
raw_zip_col="Physical Address ZIP Code",
out_html=out_clients_html,
)
# clients that exist IN THIS RUN
with st.spinner("Generating RUN client ZIP footprint map..."):
if not {"zip_clean", "zip_lat", "zip_lon"}.issubset(
people_master_df.columns
):
people_master_df = add_zip_geography(
people_master_df,
raw_zip_col="Zip/Postal Code",
zip_col_out="zip_clean",
)
run_clients_df = people_master_df[
people_master_df["Client?"]
].copy()
map_centers_for_run_clients(
people_df=run_clients_df,
out_html=str(out_run_clients_html),
)
with st.spinner(
"Generating ALL attendees ZIP footprint (aggregated)..."
):
make_all_attendees_zip_map_single_colored(
people_df=people_master_df,
out_html=str(out_all_people_html),
zip_col="zip_clean",
zip_lat_col="zip_lat",
zip_lon_col="zip_lon",
dot_color="blue",
)
st.success("Center maps generated and saved.")
# -------------------------
# Preview maps
# -------------------------
st.divider()
show_prev = st.toggle("Preview latest maps", value=True)
if show_prev:
if out_nonclients_html.exists():
st.markdown("### Non-clients ZIP footprint")
st.components.v1.html(
out_nonclients_html.read_text(encoding="utf-8"),
height=650,
)
if out_clients_html.exists():
st.markdown("### All NeoSerra clients ZIP footprint")
st.components.v1.html(
out_clients_html.read_text(encoding="utf-8"),
height=650,
)
if out_run_clients_html.exists():
st.markdown("### Clients in this run ZIP footprint")
st.components.v1.html(
out_run_clients_html.read_text(encoding="utf-8"),
height=650,
)
if out_all_people_html.exists():
st.markdown("### All attendees ZIP footprint")
st.components.v1.html(
out_all_people_html.read_text(encoding="utf-8"),
height=650,
)
# -------------------------
# TAB 5: Batch + Review
# -------------------------
with tab_review:
st.subheader("Batch results + review")
# --- Global (FINAL) collision status + overwrite status ---
meta = st.session_state.last_run_meta or {}
m1, m2, m3, m4 = st.columns(4)
m1.metric(
"Collisions in MASTER (groups)", meta.get("global_collision_groups_master", "—")
)
m2.metric("Collisions reviewed (rows)", meta.get("overwrite_reviewed_rows", "—"))
m3.metric(
"Collisions in FINAL (groups)", meta.get("global_collision_groups_final", "—")
)
m4.metric("Unreviewed (rows)", meta.get("overwrite_unreviewed_rows", "—"))
if batch_df is None:
st.info("No batch results yet. Run the pipeline first.")
else:
ok_count = (
int((batch_df["status"] == "ok").sum())
if "status" in batch_df.columns
else 0
)
err_count = (