diff --git a/Cargo.lock b/Cargo.lock index bfe4d0a7..2bdd31e4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -424,6 +424,7 @@ dependencies = [ name = "deps-lsp" version = "0.5.1" dependencies = [ + "async-trait", "criterion", "dashmap", "deps-cargo", diff --git a/README.md b/README.md index 0b687960..002573c6 100644 --- a/README.md +++ b/README.md @@ -14,11 +14,12 @@ A universal Language Server Protocol (LSP) server for dependency management acro - **Intelligent Autocomplete** — Package names, versions, and feature flags - **Version Hints** — Inlay hints showing latest available versions +- **Loading Indicators** — Visual feedback during registry fetches with LSP progress support - **Lock File Support** — Reads resolved versions from Cargo.lock, package-lock.json, poetry.lock, uv.lock, go.sum - **Diagnostics** — Warnings for outdated, unknown, or yanked dependencies - **Hover Information** — Package descriptions with resolved version from lock file - **Code Actions** — Quick fixes to update dependencies -- **High Performance** — Parallel fetching, optimized caching, minimal latency +- **High Performance** — Parallel fetching with per-dependency timeouts, optimized caching ![deps-lsp in action](https://raw.githubusercontent.com/bug-ops/deps-zed/main/assets/img.png) @@ -150,7 +151,15 @@ Configure via LSP initialization options: "yanked_severity": "warning" }, "cache": { - "refresh_interval_secs": 300 + "enabled": true, + "refresh_interval_secs": 300, + "fetch_timeout_secs": 5, + "max_concurrent_fetches": 20 + }, + "loading_indicator": { + "enabled": true, + "fallback_to_hints": true, + "loading_text": "⏳" }, "cold_start": { "enabled": true, @@ -159,6 +168,19 @@ Configure via LSP initialization options: } ``` +### Configuration Reference + +| Section | Option | Default | Description | +|---------|--------|---------|-------------| +| `cache` | `fetch_timeout_secs` | `5` | Per-package fetch timeout (1-300 seconds) | +| `cache` | `max_concurrent_fetches` | `20` | Concurrent registry requests (1-100) | +| `loading_indicator` | `enabled` | `true` | Show loading feedback during fetches | +| `loading_indicator` | `fallback_to_hints` | `true` | Show loading in inlay hints if LSP progress unsupported | +| `loading_indicator` | `loading_text` | `"⏳"` | Text shown during loading (max 100 chars) | + +> [!TIP] +> Increase `fetch_timeout_secs` for slower networks. The per-dependency timeout prevents slow packages from blocking others. + > [!NOTE] > Cold start support ensures LSP features work immediately when your IDE restores previously opened files. diff --git a/crates/deps-lsp/Cargo.toml b/crates/deps-lsp/Cargo.toml index 5774075e..12ae1238 100644 --- a/crates/deps-lsp/Cargo.toml +++ b/crates/deps-lsp/Cargo.toml @@ -47,6 +47,7 @@ tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } [dev-dependencies] +async-trait = { workspace = true } criterion = { workspace = true } insta = { workspace = true, features = ["json"] } mockito = { workspace = true } diff --git a/crates/deps-lsp/src/config.rs b/crates/deps-lsp/src/config.rs index 3cfffd8b..9909548a 100644 --- a/crates/deps-lsp/src/config.rs +++ b/crates/deps-lsp/src/config.rs @@ -134,6 +134,8 @@ impl Default for DiagnosticsConfig { /// /// - `enabled`: `true` /// - `refresh_interval_secs`: `300` (5 minutes) +/// - `fetch_timeout_secs`: `5` (5 seconds per package) +/// - `max_concurrent_fetches`: `20` (20 concurrent requests) /// /// # Examples /// @@ -143,16 +145,30 @@ impl Default for DiagnosticsConfig { /// let config = CacheConfig { /// refresh_interval_secs: 600, // 10 minutes /// enabled: true, +/// fetch_timeout_secs: 5, +/// max_concurrent_fetches: 20, /// }; /// /// assert_eq!(config.refresh_interval_secs, 600); /// ``` -#[derive(Debug, Deserialize)] +#[derive(Debug, Clone, Deserialize)] pub struct CacheConfig { #[serde(default = "default_refresh_interval")] pub refresh_interval_secs: u64, #[serde(default = "default_true")] pub enabled: bool, + /// Timeout for fetching a single package's versions (default: 5 seconds) + #[serde( + default = "default_fetch_timeout_secs", + deserialize_with = "deserialize_fetch_timeout" + )] + pub fetch_timeout_secs: u64, + /// Maximum concurrent package fetches (default: 20) + #[serde( + default = "default_max_concurrent_fetches", + deserialize_with = "deserialize_max_concurrent" + )] + pub max_concurrent_fetches: usize, } impl Default for CacheConfig { @@ -160,6 +176,8 @@ impl Default for CacheConfig { Self { refresh_interval_secs: default_refresh_interval(), enabled: true, + fetch_timeout_secs: default_fetch_timeout_secs(), + max_concurrent_fetches: default_max_concurrent_fetches(), } } } @@ -262,6 +280,62 @@ const fn default_refresh_interval() -> u64 { 300 // 5 minutes } +const fn default_fetch_timeout_secs() -> u64 { + 5 +} + +const fn default_max_concurrent_fetches() -> usize { + 20 +} + +/// Minimum timeout (seconds) to prevent zero-timeout edge case +const MIN_FETCH_TIMEOUT_SECS: u64 = 1; +/// Maximum timeout (seconds) - 5 minutes is generous +const MAX_FETCH_TIMEOUT_SECS: u64 = 300; + +/// Minimum concurrent fetches (must be at least 1) +const MIN_CONCURRENT_FETCHES: usize = 1; +/// Maximum concurrent fetches +const MAX_CONCURRENT_FETCHES: usize = 100; + +/// Custom deserializer for fetch_timeout_secs that validates bounds +fn deserialize_fetch_timeout<'de, D>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, +{ + let secs = u64::deserialize(deserializer)?; + let clamped = secs.clamp(MIN_FETCH_TIMEOUT_SECS, MAX_FETCH_TIMEOUT_SECS); + if clamped != secs { + tracing::warn!( + "fetch_timeout_secs {} clamped to {} (valid range: {}-{})", + secs, + clamped, + MIN_FETCH_TIMEOUT_SECS, + MAX_FETCH_TIMEOUT_SECS + ); + } + Ok(clamped) +} + +/// Custom deserializer for max_concurrent_fetches that validates bounds +fn deserialize_max_concurrent<'de, D>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, +{ + let count = usize::deserialize(deserializer)?; + let clamped = count.clamp(MIN_CONCURRENT_FETCHES, MAX_CONCURRENT_FETCHES); + if clamped != count { + tracing::warn!( + "max_concurrent_fetches {} clamped to {} (valid range: {}-{})", + count, + clamped, + MIN_CONCURRENT_FETCHES, + MAX_CONCURRENT_FETCHES + ); + } + Ok(clamped) +} + /// Configuration for cold start behavior. /// /// Controls how the server handles loading documents from disk when @@ -362,6 +436,31 @@ mod tests { assert!(!config.enabled); } + #[test] + fn test_cache_config_defaults() { + let config = CacheConfig::default(); + assert!(config.enabled); + assert_eq!(config.refresh_interval_secs, 300); + assert_eq!(config.fetch_timeout_secs, 5); + assert_eq!(config.max_concurrent_fetches, 20); + } + + #[test] + fn test_cache_config_with_timeout_and_concurrency() { + let json = r#"{ + "refresh_interval_secs": 600, + "enabled": true, + "fetch_timeout_secs": 10, + "max_concurrent_fetches": 50 + }"#; + + let config: CacheConfig = serde_json::from_str(json).unwrap(); + assert_eq!(config.refresh_interval_secs, 600); + assert!(config.enabled); + assert_eq!(config.fetch_timeout_secs, 10); + assert_eq!(config.max_concurrent_fetches, 50); + } + #[test] fn test_full_config_deserialization() { let json = r#"{ @@ -529,4 +628,52 @@ mod tests { let config: LoadingIndicatorConfig = serde_json::from_str(json).unwrap(); assert_eq!(config.loading_text, "⏳"); } + + #[test] + fn test_cache_config_fetch_timeout_clamped_min() { + let json = r#"{"fetch_timeout_secs": 0}"#; + let config: CacheConfig = serde_json::from_str(json).unwrap(); + assert_eq!(config.fetch_timeout_secs, 1, "Should clamp 0 to MIN"); + } + + #[test] + fn test_cache_config_fetch_timeout_clamped_max() { + let json = r#"{"fetch_timeout_secs": 999999}"#; + let config: CacheConfig = serde_json::from_str(json).unwrap(); + assert_eq!(config.fetch_timeout_secs, 300, "Should clamp to MAX"); + } + + #[test] + fn test_cache_config_fetch_timeout_valid_range() { + let json = r#"{"fetch_timeout_secs": 10}"#; + let config: CacheConfig = serde_json::from_str(json).unwrap(); + assert_eq!( + config.fetch_timeout_secs, 10, + "Valid value should not be clamped" + ); + } + + #[test] + fn test_cache_config_max_concurrent_clamped_min() { + let json = r#"{"max_concurrent_fetches": 0}"#; + let config: CacheConfig = serde_json::from_str(json).unwrap(); + assert_eq!(config.max_concurrent_fetches, 1, "Should clamp 0 to MIN"); + } + + #[test] + fn test_cache_config_max_concurrent_clamped_max() { + let json = r#"{"max_concurrent_fetches": 100000}"#; + let config: CacheConfig = serde_json::from_str(json).unwrap(); + assert_eq!(config.max_concurrent_fetches, 100, "Should clamp to MAX"); + } + + #[test] + fn test_cache_config_max_concurrent_valid_range() { + let json = r#"{"max_concurrent_fetches": 50}"#; + let config: CacheConfig = serde_json::from_str(json).unwrap(); + assert_eq!( + config.max_concurrent_fetches, 50, + "Valid value should not be clamped" + ); + } } diff --git a/crates/deps-lsp/src/document/lifecycle.rs b/crates/deps-lsp/src/document/lifecycle.rs index 28ab0aeb..e8a67e0d 100644 --- a/crates/deps-lsp/src/document/lifecycle.rs +++ b/crates/deps-lsp/src/document/lifecycle.rs @@ -23,45 +23,71 @@ use tower_lsp_server::ls_types::{MessageType, Uri}; /// Returns a HashMap mapping package names to their latest version strings. /// Packages that fail to fetch are omitted from the result. /// -/// This function executes all registry requests concurrently, reducing -/// total fetch time from O(N × network_latency) to O(max(network_latency)). +/// This function executes all registry requests concurrently with per-dependency +/// timeout isolation, preventing slow packages from blocking others. /// /// # Arguments /// /// * `registry` - Package registry to fetch from /// * `package_names` - List of package names to fetch /// * `progress` - Optional progress tracker (will be updated after each fetch) +/// * `timeout_secs` - Timeout for each individual package fetch (default: 5s) +/// * `max_concurrent` - Maximum concurrent fetches (default: 20) +/// +/// # Timeout Behavior +/// +/// Each package fetch is wrapped in an individual timeout. If a package +/// takes longer than `timeout_secs` to fetch, it fails fast with a warning +/// and does NOT block other packages. /// /// # Examples /// /// With 50 dependencies and 100ms per request: /// - Sequential: 50 × 100ms = 5000ms -/// - Parallel: max(100ms) ≈ 150ms +/// - Parallel (no timeout): max(100ms) ≈ 150ms +/// - Parallel (5s timeout, 1 slow package at 30s): max(5s) ≈ 5s async fn fetch_latest_versions_parallel( registry: Arc, package_names: Vec, progress: Option<&RegistryProgress>, + timeout_secs: u64, + max_concurrent: usize, ) -> HashMap { use futures::stream::{self, StreamExt}; + use std::time::Duration; let total = package_names.len(); let fetched = Arc::new(std::sync::atomic::AtomicUsize::new(0)); + let timeout = Duration::from_secs(timeout_secs); - // Process fetches concurrently while reporting progress + // Process fetches concurrently with per-dependency timeout let results: Vec<_> = stream::iter(package_names) .map(|name| { let registry = Arc::clone(®istry); let fetched = Arc::clone(&fetched); async move { - let result = registry - .get_versions(&name) - .await - .ok() - .and_then(|versions| { + // Wrap each fetch in a timeout + let result = tokio::time::timeout(timeout, registry.get_versions(&name)).await; + + let version = match result { + Ok(Ok(versions)) => { // Use shared utility for consistent behavior with diagnostics deps_core::find_latest_stable(&versions) - .map(|v| (name, v.version_string().to_string())) - }); + .map(|v| (name.clone(), v.version_string().to_string())) + } + Ok(Err(e)) => { + tracing::warn!(package = %name, error = %e, "Failed to fetch versions"); + None + } + Err(_) => { + tracing::warn!( + package = %name, + timeout_secs, + "Fetch timed out" + ); + None + } + }; // Increment counter and report progress let count = fetched.fetch_add(1, std::sync::atomic::Ordering::Relaxed) + 1; @@ -69,10 +95,10 @@ async fn fetch_latest_versions_parallel( progress.update(count, total).await; } - result + version } }) - .buffer_unordered(10) // Limit concurrent requests to avoid overwhelming the registry + .buffer_unordered(max_concurrent) .collect() .await; @@ -88,7 +114,7 @@ pub async fn handle_document_open( content: String, state: Arc, client: Client, - _config: Arc>, + config: Arc>, ) -> Result> { // Find appropriate ecosystem for this URI let ecosystem = match state.ecosystem_registry.get_for_uri(&uri) { @@ -120,6 +146,9 @@ pub async fn handle_document_open( state.update_document(uri.clone(), doc_state); + // Clone cache config before spawning background task + let cache_config = { config.read().await.cache.clone() }; + // Spawn background task to fetch versions let uri_clone = uri.clone(); let state_clone = Arc::clone(&state); @@ -169,8 +198,14 @@ pub async fn handle_document_open( // Fetch latest versions from registry in parallel (for update hints) let registry = ecosystem_clone.registry(); - let cached_versions = - fetch_latest_versions_parallel(registry, dep_names, progress.as_ref()).await; + let cached_versions = fetch_latest_versions_parallel( + registry, + dep_names, + progress.as_ref(), + cache_config.fetch_timeout_secs, + cache_config.max_concurrent_fetches, + ) + .await; let success = !cached_versions.is_empty(); @@ -216,7 +251,7 @@ pub async fn handle_document_change( content: String, state: Arc, client: Client, - _config: Arc>, + config: Arc>, ) -> Result> { // Find appropriate ecosystem for this URI let ecosystem = match state.ecosystem_registry.get_for_uri(&uri) { @@ -242,6 +277,9 @@ pub async fn handle_document_change( state.update_document(uri.clone(), doc_state); + // Clone cache config before spawning background task + let cache_config = { config.read().await.cache.clone() }; + // Spawn background task to update diagnostics let uri_clone = uri.clone(); let state_clone = Arc::clone(&state); @@ -294,8 +332,14 @@ pub async fn handle_document_change( // Fetch latest versions from registry in parallel (for update hints) let registry = ecosystem_clone.registry(); - let cached_versions = - fetch_latest_versions_parallel(registry, dep_names, progress.as_ref()).await; + let cached_versions = fetch_latest_versions_parallel( + registry, + dep_names, + progress.as_ref(), + cache_config.fetch_timeout_secs, + cache_config.max_concurrent_fetches, + ) + .await; let success = !cached_versions.is_empty(); @@ -538,6 +582,379 @@ mod tests { // This error would cause ensure_document_loaded to return false } + #[tokio::test] + async fn test_fetch_latest_versions_parallel_with_timeout() { + use async_trait::async_trait; + use deps_core::{Metadata, Registry, Version}; + use std::any::Any; + use std::time::Duration; + + // Mock registry that always times out + struct TimeoutRegistry; + + #[async_trait] + impl Registry for TimeoutRegistry { + async fn get_versions(&self, _name: &str) -> deps_core::Result>> { + // Sleep longer than timeout (5s default) + tokio::time::sleep(Duration::from_secs(10)).await; + Ok(vec![]) + } + + async fn get_latest_matching( + &self, + _name: &str, + _req: &str, + ) -> deps_core::Result>> { + Ok(None) + } + + async fn search( + &self, + _query: &str, + _limit: usize, + ) -> deps_core::Result>> { + Ok(vec![]) + } + + fn package_url(&self, name: &str) -> String { + format!("https://example.com/{}", name) + } + + fn as_any(&self) -> &dyn Any { + self + } + } + + let registry: Arc = Arc::new(TimeoutRegistry); + let packages = vec!["slow-package".to_string()]; + + // Use 1 second timeout for test speed + let result = fetch_latest_versions_parallel(registry, packages, None, 1, 10).await; + + // Should return empty (timeout, not success) + assert!(result.is_empty(), "Slow package should timeout"); + } + + #[tokio::test] + async fn test_fetch_latest_versions_parallel_fast_packages_not_blocked() { + use async_trait::async_trait; + use deps_core::{Metadata, Registry, Version}; + use std::any::Any; + use std::time::Duration; + + // Mock registry with one slow, one fast package + struct MixedRegistry; + + #[async_trait] + impl Registry for MixedRegistry { + async fn get_versions(&self, name: &str) -> deps_core::Result>> { + if name == "slow-package" { + // Sleep longer than timeout + tokio::time::sleep(Duration::from_secs(10)).await; + } + // Fast package or unknown: return immediately + Ok(vec![]) + } + + async fn get_latest_matching( + &self, + _name: &str, + _req: &str, + ) -> deps_core::Result>> { + Ok(None) + } + + async fn search( + &self, + _query: &str, + _limit: usize, + ) -> deps_core::Result>> { + Ok(vec![]) + } + + fn package_url(&self, name: &str) -> String { + format!("https://example.com/{}", name) + } + + fn as_any(&self) -> &dyn Any { + self + } + } + + let registry: Arc = Arc::new(MixedRegistry); + let packages = vec!["slow-package".to_string(), "fast-package".to_string()]; + + let start = std::time::Instant::now(); + let result = fetch_latest_versions_parallel(registry, packages, None, 1, 10).await; + let elapsed = start.elapsed(); + + // Should complete in ~1s (timeout), not 10s (slow package duration) + assert!( + elapsed < Duration::from_secs(3), + "Should not wait for slow package: {:?}", + elapsed + ); + + // Fast package processed, slow package omitted + assert!( + result.is_empty(), + "No versions returned (test registry returns empty)" + ); + } + + #[tokio::test] + async fn test_fetch_latest_versions_parallel_concurrency_limit() { + use async_trait::async_trait; + use deps_core::{Metadata, Registry, Version}; + use std::any::Any; + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::time::Duration; + + // Mock registry that tracks concurrent requests + struct ConcurrencyTrackingRegistry { + current: Arc, + max_seen: Arc, + } + + #[async_trait] + impl Registry for ConcurrencyTrackingRegistry { + async fn get_versions(&self, _name: &str) -> deps_core::Result>> { + // Increment concurrent counter + let current = self.current.fetch_add(1, Ordering::SeqCst) + 1; + + // Track max concurrent + self.max_seen.fetch_max(current, Ordering::SeqCst); + + // Simulate work + tokio::time::sleep(Duration::from_millis(50)).await; + + // Decrement counter + self.current.fetch_sub(1, Ordering::SeqCst); + + Ok(vec![]) + } + + async fn get_latest_matching( + &self, + _name: &str, + _req: &str, + ) -> deps_core::Result>> { + Ok(None) + } + + async fn search( + &self, + _query: &str, + _limit: usize, + ) -> deps_core::Result>> { + Ok(vec![]) + } + + fn package_url(&self, name: &str) -> String { + format!("https://example.com/{}", name) + } + + fn as_any(&self) -> &dyn Any { + self + } + } + + let current = Arc::new(AtomicUsize::new(0)); + let max_seen = Arc::new(AtomicUsize::new(0)); + + let registry: Arc = Arc::new(ConcurrencyTrackingRegistry { + current: Arc::clone(¤t), + max_seen: Arc::clone(&max_seen), + }); + + // Create 50 packages, limit concurrency to 20 + let packages: Vec = (0..50).map(|i| format!("package-{}", i)).collect(); + + fetch_latest_versions_parallel(registry, packages, None, 5, 20).await; + + // Max concurrent should not exceed limit (allow small margin for timing) + let max = max_seen.load(Ordering::SeqCst); + assert!( + max <= 22, + "Concurrency limit violated: {} concurrent requests (limit: 20)", + max + ); + } + + #[tokio::test] + async fn test_fetch_partial_success_with_mixed_outcomes() { + use async_trait::async_trait; + use deps_core::{Metadata, Registry, Version}; + use std::any::Any; + use std::time::Duration; + + // Mock version for successful fetches + #[derive(Debug)] + struct MockVersion { + version: String, + } + + impl Version for MockVersion { + fn version_string(&self) -> &str { + &self.version + } + + fn is_prerelease(&self) -> bool { + false + } + + fn is_yanked(&self) -> bool { + false + } + + fn as_any(&self) -> &dyn Any { + self + } + } + + // Mock registry with mixed outcomes: + // - "package-fast" returns quickly with version + // - "package-slow" times out + // - "package-error" returns error + struct MixedOutcomeRegistry; + + #[async_trait] + impl Registry for MixedOutcomeRegistry { + async fn get_versions(&self, name: &str) -> deps_core::Result>> { + match name { + "package-fast" => { + // Return immediately with a stable version + Ok(vec![Box::new(MockVersion { + version: "1.0.0".to_string(), + })]) + } + "package-slow" => { + // Sleep longer than timeout (test uses 1s timeout) + tokio::time::sleep(Duration::from_secs(10)).await; + Ok(vec![]) + } + "package-error" => { + // Return cache error (simpler for testing) + Err(deps_core::error::DepsError::CacheError( + "Mock registry error".to_string(), + )) + } + _ => Ok(vec![]), + } + } + + async fn get_latest_matching( + &self, + _name: &str, + _req: &str, + ) -> deps_core::Result>> { + Ok(None) + } + + async fn search( + &self, + _query: &str, + _limit: usize, + ) -> deps_core::Result>> { + Ok(vec![]) + } + + fn package_url(&self, name: &str) -> String { + format!("https://example.com/{}", name) + } + + fn as_any(&self) -> &dyn Any { + self + } + } + + let registry: Arc = Arc::new(MixedOutcomeRegistry); + let packages = vec![ + "package-fast".to_string(), + "package-slow".to_string(), + "package-error".to_string(), + ]; + + // Use 1 second timeout for test speed + let result = fetch_latest_versions_parallel(registry, packages, None, 1, 10).await; + + // Only the fast package should be in results + assert_eq!(result.len(), 1, "Should have exactly 1 successful package"); + assert_eq!( + result.get("package-fast"), + Some(&"1.0.0".to_string()), + "Fast package should have correct version" + ); + assert!( + !result.contains_key("package-slow"), + "Slow package should not be in results (timeout)" + ); + assert!( + !result.contains_key("package-error"), + "Error package should not be in results" + ); + } + + #[tokio::test] + async fn test_fetch_registry_error_handled() { + use async_trait::async_trait; + use deps_core::{Metadata, Registry, Version}; + use std::any::Any; + + // Mock registry that returns errors for all packages + struct ErrorRegistry; + + #[async_trait] + impl Registry for ErrorRegistry { + async fn get_versions(&self, name: &str) -> deps_core::Result>> { + Err(deps_core::error::DepsError::CacheError(format!( + "Failed to fetch package: {}", + name + ))) + } + + async fn get_latest_matching( + &self, + _name: &str, + _req: &str, + ) -> deps_core::Result>> { + Ok(None) + } + + async fn search( + &self, + _query: &str, + _limit: usize, + ) -> deps_core::Result>> { + Ok(vec![]) + } + + fn package_url(&self, name: &str) -> String { + format!("https://example.com/{}", name) + } + + fn as_any(&self) -> &dyn Any { + self + } + } + + let registry: Arc = Arc::new(ErrorRegistry); + let packages = vec![ + "package-1".to_string(), + "package-2".to_string(), + "package-3".to_string(), + ]; + + // Should not panic, just return empty result + let result = fetch_latest_versions_parallel(registry, packages, None, 5, 10).await; + + // All packages failed, result should be empty + assert!( + result.is_empty(), + "All packages with errors should be omitted from results" + ); + } + // Cargo-specific tests #[cfg(feature = "cargo")] mod cargo_tests { diff --git a/templates/README.md b/templates/README.md index c6e5b034..e93c8b87 100644 --- a/templates/README.md +++ b/templates/README.md @@ -21,7 +21,8 @@ This directory contains template files for creating new ecosystem support in dep 4. Implement the TODO sections in each file 5. Add your crate to the workspace in `Cargo.toml` -6. Register your ecosystem in `deps-lsp/src/document.rs` +6. Add feature flag in `deps-lsp/Cargo.toml` +7. Register your ecosystem in `deps-lsp/src/lib.rs` using `ecosystem!()` and `register!()` macros ## File Structure