|
1 | | -"""Run a tiny A/B experiment comparing boundary penalties.""" |
| 1 | +"""Run a tiny A/B experiment comparing horn-filler uniqueness.""" |
2 | 2 |
|
3 | | -import math |
4 | | -from typing import Tuple |
5 | | - |
6 | | -try: # pragma: no cover - optional dependency |
7 | | - import torch |
8 | | - from torch import nn |
9 | | - from torch.utils.data import DataLoader, TensorDataset |
10 | | -except ModuleNotFoundError: # pragma: no cover - friendly exit when torch missing |
11 | | - print("PyTorch is required to run the boundary penalty demo. Please install torch.") |
12 | | - raise SystemExit(0) |
13 | | - |
14 | | -from simplicial_tensors.nn import BoundaryConfig, train_with_boundary |
15 | | - |
16 | | - |
17 | | -def make_synthetic_dataset(n: int = 1024, noise: float = 0.1) -> Tuple[TensorDataset, TensorDataset]: |
18 | | - torch.manual_seed(0) |
19 | | - x = torch.randn(n, 2) |
20 | | - y = torch.sign(x[:, 0] * x[:, 1]).unsqueeze(-1) |
21 | | - y[y == 0] = 1 |
22 | | - y = (y > 0).float() |
23 | | - |
24 | | - x += noise * torch.randn_like(x) |
25 | | - split = n // 5 |
26 | | - val_x, val_y = x[:split], y[:split] |
27 | | - train_x, train_y = x[split:], y[split:] |
28 | | - train_ds = TensorDataset(train_x, train_y) |
29 | | - val_ds = TensorDataset(val_x, val_y) |
30 | | - return train_ds, val_ds |
| 3 | +from __future__ import annotations |
31 | 4 |
|
| 5 | +from typing import Tuple |
32 | 6 |
|
33 | | -def make_model(seed: int = 0) -> nn.Module: |
34 | | - torch.manual_seed(seed) |
35 | | - return nn.Sequential( |
36 | | - nn.Linear(2, 32), |
37 | | - nn.ReLU(), |
38 | | - nn.Linear(32, 32), |
39 | | - nn.ReLU(), |
40 | | - nn.Linear(32, 1), |
41 | | - ) |
| 7 | +import numpy as np |
42 | 8 |
|
| 9 | +from simplicial_tensors.tensor_ops import ( |
| 10 | + filler, |
| 11 | + horn, |
| 12 | + n_hypergroupoid_conjecture, |
| 13 | + random_tensor, |
| 14 | +) |
43 | 15 |
|
44 | | -def run_job(mode: str, coboundary: str = "zero", lambda1: float = 1e-3, epochs: int = 10) -> None: |
45 | | - device = "cuda" if torch.cuda.is_available() else "cpu" |
46 | | - train_ds, val_ds = make_synthetic_dataset() |
47 | | - train_loader = DataLoader(train_ds, batch_size=64, shuffle=True) |
48 | | - val_loader = DataLoader(val_ds, batch_size=64) |
49 | 16 |
|
50 | | - model = make_model(seed=42) |
51 | | - optimizer = torch.optim.Adam(model.parameters(), lr=5e-3) |
52 | | - criterion = nn.BCEWithLogitsLoss() |
| 17 | +def run_case(shape: Tuple[int, ...], missing_face: int, seed: int) -> None: |
| 18 | + tensor = random_tensor(shape, low=-5, high=6, seed=seed) |
| 19 | + horn_faces = horn(tensor, missing_face) |
| 20 | + reconstructed = filler(horn_faces, missing_face) |
| 21 | + unique = np.array_equal(tensor, reconstructed) |
| 22 | + predicted = n_hypergroupoid_conjecture(shape) |
53 | 23 |
|
54 | | - cfg = BoundaryConfig(mode=mode, coboundary=coboundary, lambda1=lambda1) |
55 | | - history = train_with_boundary( |
56 | | - model, |
57 | | - train_loader, |
58 | | - val_loader, |
59 | | - optimizer, |
60 | | - criterion, |
61 | | - device, |
62 | | - epochs, |
63 | | - cfg, |
| 24 | + print( |
| 25 | + f"shape={shape} omitted_face={missing_face} predicted_unique={predicted} " |
| 26 | + f"observed_unique={unique}" |
64 | 27 | ) |
65 | 28 |
|
66 | | - final_loss = history["val_loss"][-1] |
67 | | - final_boundary = history["val_boundary"][-1] |
68 | | - print(f"Mode={mode!r} coboundary={coboundary!r}: val_loss={final_loss:.4f} boundary={final_boundary:.4f}") |
69 | | - |
70 | 29 |
|
71 | 30 | def main() -> None: |
72 | | - print("=== Boundary penalty A/B demo ===") |
73 | | - run_job(mode="incidence", lambda1=5e-4, epochs=8) |
74 | | - run_job(mode="principal", coboundary="zero", lambda1=5e-4, epochs=8) |
| 31 | + print("=== Horn filler A/B demo ===") |
| 32 | + run_case(shape=(3, 3), missing_face=1, seed=123) |
| 33 | + run_case(shape=(5, 5), missing_face=1, seed=456) |
75 | 34 |
|
76 | 35 |
|
77 | 36 | if __name__ == "__main__": |
|
0 commit comments