|
28 | 28 | apply_similarity_transform, |
29 | 29 | SimilarityTransform, |
30 | 30 | ) |
| 31 | +from caliscope.core.scale_accuracy import ( |
| 32 | + compute_scale_accuracy as compute_scale_accuracy_impl, |
| 33 | + ScaleAccuracyData, |
| 34 | +) |
31 | 35 |
|
32 | 36 | import pandas as pd |
33 | 37 |
|
@@ -512,6 +516,65 @@ def filter_by_percentile_error( |
512 | 516 |
|
513 | 517 | return self._filter_by_reprojection_thresholds(thresholds, min_per_camera) |
514 | 518 |
|
| 519 | + def compute_scale_accuracy(self, sync_index: int) -> ScaleAccuracyData: |
| 520 | + """Compute scale accuracy comparing triangulated points to known object geometry. |
| 521 | +
|
| 522 | + Compares triangulated world points at the given sync_index to their |
| 523 | + corresponding ground truth object positions (from obj_loc columns) to |
| 524 | + assess reconstruction accuracy. Uses ALL pairwise distances between |
| 525 | + detected corners for robust statistical measurement. |
| 526 | +
|
| 527 | + Args: |
| 528 | + sync_index: Frame index to compute accuracy at |
| 529 | +
|
| 530 | + Returns: |
| 531 | + ScaleAccuracyData with distance RMSE and relative error |
| 532 | +
|
| 533 | + Raises: |
| 534 | + ValueError: If insufficient matched points at sync_index (< 2) |
| 535 | + """ |
| 536 | + # Extract data at sync_index |
| 537 | + img_df = self.image_points.df |
| 538 | + world_df = self.world_points.df |
| 539 | + |
| 540 | + img_subset = img_df[img_df["sync_index"] == sync_index] |
| 541 | + world_subset = world_df[world_df["sync_index"] == sync_index] |
| 542 | + |
| 543 | + if img_subset.empty: |
| 544 | + raise ValueError(f"No image observations at sync_index {sync_index}") |
| 545 | + if world_subset.empty: |
| 546 | + raise ValueError(f"No world points at sync_index {sync_index}") |
| 547 | + |
| 548 | + # Get image points with object locations at reference frame |
| 549 | + # Use drop_duplicates on img_subset since multiple cameras may see same point_id |
| 550 | + obj_points_df = img_subset[["point_id", "obj_loc_x", "obj_loc_y", "obj_loc_z"]].drop_duplicates( |
| 551 | + subset=["point_id"] |
| 552 | + ) |
| 553 | + |
| 554 | + # Merge world points with object locations by point_id |
| 555 | + merged = world_subset.merge(obj_points_df, on="point_id", how="inner") |
| 556 | + |
| 557 | + if len(merged) < 2: |
| 558 | + raise ValueError(f"Insufficient matched points for scale accuracy: {len(merged)} (need at least 2)") |
| 559 | + |
| 560 | + # Handle planar objects (z=0 or NaN) |
| 561 | + if merged["obj_loc_z"].isna().all(): |
| 562 | + merged = merged.copy() |
| 563 | + merged["obj_loc_z"] = 0.0 |
| 564 | + |
| 565 | + # Filter out any remaining NaN values |
| 566 | + valid_mask = ~merged[["obj_loc_x", "obj_loc_y", "obj_loc_z"]].isna().any(axis=1) |
| 567 | + merged = merged[valid_mask] |
| 568 | + |
| 569 | + if len(merged) < 2: |
| 570 | + raise ValueError("Insufficient valid points after NaN filtering (need at least 2)") |
| 571 | + |
| 572 | + # Extract arrays for scale accuracy computation |
| 573 | + world_points = merged[["x_coord", "y_coord", "z_coord"]].to_numpy() |
| 574 | + object_points = merged[["obj_loc_x", "obj_loc_y", "obj_loc_z"]].to_numpy() |
| 575 | + |
| 576 | + return compute_scale_accuracy_impl(world_points, object_points, sync_index) |
| 577 | + |
515 | 578 | def align_to_object(self, sync_index: int) -> "PointDataBundle": |
516 | 579 | """ |
517 | 580 | Align the bundle to real-world units using object point correspondences. |
|
0 commit comments