|
39 | 39 | METRIC_REQUEST_TOPIC_TEMPLATE, |
40 | 40 | ACTIVE_DISCOVERY_TIMEOUT, |
41 | 41 | LEGACY_DISCOVERY_TIMEOUT, |
42 | | - MINIMUM_DISCOVERY_TOPICS, |
43 | | - GOOD_DISCOVERY_TOPICS, |
44 | | - EXCELLENT_DISCOVERY_TOPICS, |
| 42 | + MINIMUM_DISCOVERY_PERCENT, |
| 43 | + GOOD_DISCOVERY_PERCENT, |
| 44 | + GENERIC_VEHICLE_TYPE, |
| 45 | + GENERIC_VEHICLE_NAME, |
45 | 46 | LOGGER_NAME, |
46 | 47 | ERROR_CANNOT_CONNECT, |
47 | 48 | ERROR_TIMEOUT, |
48 | 49 | ERROR_UNKNOWN, |
49 | 50 | ) |
| 51 | +from ..metrics.vehicles import VEHICLE_TYPE_PREFIXES, VEHICLE_TYPE_NAMES |
50 | 52 |
|
51 | 53 | _LOGGER = logging.getLogger(LOGGER_NAME) |
52 | 54 |
|
53 | 55 |
|
| 56 | +def detect_vehicle_type(topics: Set[str]) -> tuple[str, str]: |
| 57 | + """Detect vehicle type from discovered topics. |
| 58 | +
|
| 59 | + Looks for vehicle-specific metric prefixes (xvu., xse., xmg., xnl., xrt.) |
| 60 | + in the topic paths to determine the vehicle type. |
| 61 | +
|
| 62 | + Args: |
| 63 | + topics: Set of discovered MQTT topics |
| 64 | +
|
| 65 | + Returns: |
| 66 | + Tuple of (vehicle_type_id, vehicle_type_name) |
| 67 | + e.g., ("vw_eup", "VW e-UP!") or ("generic", "Generic OVMS") |
| 68 | + """ |
| 69 | + for topic in topics: |
| 70 | + # Extract metric path from topic (last part after /metric/) |
| 71 | + if "/metric/" in topic: |
| 72 | + metric_path = topic.split("/metric/")[-1] |
| 73 | + # Check for vehicle-specific prefixes |
| 74 | + for prefix, vehicle_type in VEHICLE_TYPE_PREFIXES.items(): |
| 75 | + if metric_path.startswith(prefix): |
| 76 | + vehicle_name = VEHICLE_TYPE_NAMES.get(vehicle_type, vehicle_type) |
| 77 | + _LOGGER.debug( |
| 78 | + "Detected vehicle type '%s' (%s) from topic: %s", |
| 79 | + vehicle_type, |
| 80 | + vehicle_name, |
| 81 | + topic, |
| 82 | + ) |
| 83 | + return vehicle_type, vehicle_name |
| 84 | + |
| 85 | + return GENERIC_VEHICLE_TYPE, GENERIC_VEHICLE_NAME |
| 86 | + |
| 87 | + |
| 88 | +def get_expected_metric_count(vehicle_type: str) -> int: |
| 89 | + """Get the expected number of metrics for a vehicle type. |
| 90 | +
|
| 91 | + Calculates expected metrics by counting defined metrics from the metrics module. |
| 92 | + This provides a dynamic count based on actual metric definitions. |
| 93 | +
|
| 94 | + Args: |
| 95 | + vehicle_type: Vehicle type identifier (e.g., "vw_eup", "generic") |
| 96 | +
|
| 97 | + Returns: |
| 98 | + Expected number of metrics for this vehicle type |
| 99 | + """ |
| 100 | + # Import here to avoid circular imports |
| 101 | + # pylint: disable=import-outside-toplevel |
| 102 | + from ..metrics import METRIC_DEFINITIONS |
| 103 | + |
| 104 | + # Common categories that apply to all vehicles |
| 105 | + common_categories = [ |
| 106 | + "battery", |
| 107 | + "charging", |
| 108 | + "climate", |
| 109 | + "door", |
| 110 | + "location", |
| 111 | + "motor", |
| 112 | + "trip", |
| 113 | + "device", |
| 114 | + "diagnostic", |
| 115 | + "power", |
| 116 | + "network", |
| 117 | + "system", |
| 118 | + "tire", |
| 119 | + ] |
| 120 | + |
| 121 | + # Count common metrics |
| 122 | + common_count = sum( |
| 123 | + 1 for v in METRIC_DEFINITIONS.values() if v.get("category") in common_categories |
| 124 | + ) |
| 125 | + |
| 126 | + # Add vehicle-specific metrics if applicable |
| 127 | + if vehicle_type != "generic": |
| 128 | + vehicle_count = sum( |
| 129 | + 1 for v in METRIC_DEFINITIONS.values() if v.get("category") == vehicle_type |
| 130 | + ) |
| 131 | + total = common_count + vehicle_count |
| 132 | + _LOGGER.debug( |
| 133 | + "Expected metrics for %s: %d common + %d vehicle-specific = %d total", |
| 134 | + vehicle_type, |
| 135 | + common_count, |
| 136 | + vehicle_count, |
| 137 | + total, |
| 138 | + ) |
| 139 | + return total |
| 140 | + |
| 141 | + _LOGGER.debug("Expected metrics for generic vehicle: %d common", common_count) |
| 142 | + return common_count |
| 143 | + |
| 144 | + |
| 145 | +def calculate_discovery_percentage( |
| 146 | + metric_count: int, expected_count: int |
| 147 | +) -> tuple[int, str]: |
| 148 | + """Calculate discovery percentage and quality indicator. |
| 149 | +
|
| 150 | + Args: |
| 151 | + metric_count: Number of metrics actually discovered |
| 152 | + expected_count: Expected number of metrics for this vehicle type |
| 153 | +
|
| 154 | + Returns: |
| 155 | + Tuple of (percentage, quality_indicator) |
| 156 | + Quality indicator is emoji: ✅ (>=70%), ⚠️ (>=30%), ❌ (<30%) |
| 157 | + """ |
| 158 | + if expected_count <= 0: |
| 159 | + return 0, "❌" |
| 160 | + |
| 161 | + percentage = min(100, int((metric_count / expected_count) * 100)) |
| 162 | + |
| 163 | + if percentage >= 70: |
| 164 | + quality = "✅" |
| 165 | + elif percentage >= 30: |
| 166 | + quality = "⚠️" |
| 167 | + else: |
| 168 | + quality = "❌" |
| 169 | + |
| 170 | + return percentage, quality |
| 171 | + |
| 172 | + |
54 | 173 | def format_structure_prefix(config): |
55 | 174 | """Format the topic structure prefix based on configuration.""" |
56 | 175 | try: |
@@ -380,13 +499,25 @@ def count_metric_topics(topics): |
380 | 499 | # Count only metric topics, not /client/ echoes (Issue 1 fix) |
381 | 500 | metric_topics_before = count_metric_topics(discovered_topics) |
382 | 501 |
|
383 | | - # Check if retained messages already gave us enough metrics |
| 502 | + # Get expected metrics for percentage calculation |
| 503 | + # We detect vehicle type early to calculate thresholds |
| 504 | + vehicle_type_early, _ = detect_vehicle_type(discovered_topics) |
| 505 | + expected_count_early = get_expected_metric_count(vehicle_type_early) |
| 506 | + retained_percentage = ( |
| 507 | + int((metric_topics_before / expected_count_early) * 100) |
| 508 | + if expected_count_early > 0 |
| 509 | + else 0 |
| 510 | + ) |
| 511 | + |
| 512 | + # Check if retained messages already gave us enough metrics (percentage-based) |
384 | 513 | # This happens when broker has retained messages from a running OVMS module |
385 | | - if metric_topics_before >= GOOD_DISCOVERY_TOPICS: |
| 514 | + if retained_percentage >= GOOD_DISCOVERY_PERCENT: |
386 | 515 | _LOGGER.info( |
387 | | - "%s - Already received %d metric topics from retained messages, skipping active discovery", |
| 516 | + "%s - Already received %d metric topics (%d%%) from retained messages, " |
| 517 | + "skipping active discovery", |
388 | 518 | log_prefix, |
389 | 519 | metric_topics_before, |
| 520 | + retained_percentage, |
390 | 521 | ) |
391 | 522 | active_discovery_succeeded = True |
392 | 523 | debug_info["discovery_method"] = "retained" |
@@ -501,59 +632,65 @@ def count_metric_topics(topics): |
501 | 632 | except Exception as ex: # pylint: disable=broad-except |
502 | 633 | _LOGGER.debug("%s - Error disconnecting: %s", log_prefix, ex) |
503 | 634 |
|
504 | | - # Return the results with quality assessment (Issue 2 & 3 fix) |
| 635 | + # Process discovery results |
505 | 636 | topics_count = len(discovered_topics) |
506 | 637 | metric_topics = [t for t in discovered_topics if "/metric/" in t] |
507 | 638 | metric_count = len(metric_topics) |
508 | 639 |
|
509 | | - # Determine discovery quality for user feedback |
510 | | - if metric_count >= EXCELLENT_DISCOVERY_TOPICS: |
511 | | - discovery_quality = "excellent" |
512 | | - elif metric_count >= GOOD_DISCOVERY_TOPICS: |
513 | | - discovery_quality = "good" |
514 | | - elif metric_count >= MINIMUM_DISCOVERY_TOPICS: |
515 | | - discovery_quality = "partial" |
516 | | - elif metric_count > 0: |
517 | | - discovery_quality = "minimal" |
518 | | - else: |
519 | | - discovery_quality = "none" |
| 640 | + # Detect vehicle type and calculate expected metrics |
| 641 | + vehicle_type, vehicle_name = detect_vehicle_type(discovered_topics) |
| 642 | + expected_count = get_expected_metric_count(vehicle_type) |
| 643 | + discovery_percentage, quality_indicator = calculate_discovery_percentage( |
| 644 | + metric_count, expected_count |
| 645 | + ) |
520 | 646 |
|
521 | 647 | debug_info["topics_count"] = topics_count |
522 | 648 | debug_info["metric_count"] = metric_count |
523 | | - debug_info["discovery_quality"] = discovery_quality |
| 649 | + debug_info["vehicle_type"] = vehicle_type |
| 650 | + debug_info["vehicle_name"] = vehicle_name |
| 651 | + debug_info["expected_count"] = expected_count |
| 652 | + debug_info["discovery_percentage"] = discovery_percentage |
524 | 653 | debug_info["discovered_topics"] = ( |
525 | 654 | list(discovered_topics) |
526 | 655 | if len(discovered_topics) < 50 |
527 | 656 | else list(discovered_topics)[:50] |
528 | 657 | ) |
529 | 658 |
|
530 | 659 | _LOGGER.debug( |
531 | | - "%s - Discovery complete. Found %d topics (%d metric topics, quality: %s): %s", |
| 660 | + "%s - Discovery complete. Found %d topics (%d metric topics). " |
| 661 | + "Vehicle: %s. Coverage: %d%% (%d/%d expected)", |
532 | 662 | log_prefix, |
533 | 663 | topics_count, |
534 | 664 | metric_count, |
535 | | - discovery_quality, |
536 | | - list(metric_topics)[:10], |
| 665 | + vehicle_name, |
| 666 | + discovery_percentage, |
| 667 | + metric_count, |
| 668 | + expected_count, |
537 | 669 | ) |
538 | 670 |
|
539 | | - # Add warning if few metric topics found |
| 671 | + # Build result |
540 | 672 | result = { |
541 | 673 | "success": True, |
542 | 674 | "discovered_topics": discovered_topics, |
543 | 675 | "topic_count": topics_count, |
544 | 676 | "metric_count": metric_count, |
545 | | - "discovery_quality": discovery_quality, |
| 677 | + "vehicle_type": vehicle_type, |
| 678 | + "vehicle_name": vehicle_name, |
| 679 | + "expected_count": expected_count, |
| 680 | + "discovery_percentage": discovery_percentage, |
| 681 | + "quality_indicator": quality_indicator, |
546 | 682 | "debug_info": debug_info, |
547 | 683 | } |
548 | 684 |
|
549 | | - if metric_count < MINIMUM_DISCOVERY_TOPICS: |
| 685 | + if discovery_percentage < MINIMUM_DISCOVERY_PERCENT: |
550 | 686 | result["warning"] = "few_topics" |
551 | 687 | _LOGGER.warning( |
552 | | - "%s - Only %d metric topics found (minimum recommended: %d). " |
| 688 | + "%s - Only %d metric topics found (%d%%, minimum recommended: %d%%). " |
553 | 689 | "Check that your OVMS module is online and publishing metrics.", |
554 | 690 | log_prefix, |
555 | 691 | metric_count, |
556 | | - MINIMUM_DISCOVERY_TOPICS, |
| 692 | + discovery_percentage, |
| 693 | + MINIMUM_DISCOVERY_PERCENT, |
557 | 694 | ) |
558 | 695 |
|
559 | 696 | return result |
|
0 commit comments