deps_lsp/document/
lifecycle.rs

1//! New simplified document lifecycle using ecosystem registry.
2//!
3//! This module provides unified open/change/close handlers that work with
4//! the ecosystem trait architecture, eliminating per-ecosystem duplication.
5
6use super::loader::load_document_from_disk;
7use super::state::{DocumentState, ServerState};
8use crate::config::DepsConfig;
9use crate::handlers::diagnostics;
10use crate::progress::{ProgressSender, RegistryProgress};
11use deps_core::Ecosystem;
12use deps_core::Registry;
13use deps_core::Result;
14use std::collections::{HashMap, HashSet};
15use std::sync::Arc;
16use tokio::sync::RwLock;
17use tokio::task::JoinHandle;
18use tower_lsp_server::Client;
19use tower_lsp_server::ls_types::{MessageType, Uri};
20
21/// Preserves cached version data from old document state to new state.
22/// Called during document updates to avoid re-fetching versions for unchanged deps.
23fn preserve_cache(new_state: &mut DocumentState, old_state: &DocumentState) {
24    tracing::trace!(
25        cached = old_state.cached_versions.len(),
26        resolved = old_state.resolved_versions.len(),
27        "preserving version cache"
28    );
29    new_state
30        .cached_versions
31        .clone_from(&old_state.cached_versions);
32    new_state
33        .resolved_versions
34        .clone_from(&old_state.resolved_versions);
35}
36
37/// Diff between old and new dependency sets.
38#[derive(Debug, Clone, Default)]
39struct DependencyDiff {
40    added: Vec<String>,
41    #[allow(dead_code)]
42    removed: Vec<String>,
43}
44
45impl DependencyDiff {
46    fn compute(old_deps: &HashSet<String>, new_deps: &HashSet<String>) -> Self {
47        Self {
48            added: new_deps.difference(old_deps).cloned().collect(),
49            removed: old_deps.difference(new_deps).cloned().collect(),
50        }
51    }
52
53    #[cfg(test)]
54    fn needs_fetch(&self) -> bool {
55        !self.added.is_empty()
56    }
57}
58
59/// Result of parallel version fetching.
60struct FetchResult {
61    /// Successfully fetched versions (package -> latest version)
62    versions: HashMap<String, String>,
63    /// Number of packages that failed to fetch (timeout or error)
64    failed_count: usize,
65    /// First actionable error message (shown to user via `window/showMessage`)
66    first_error: Option<String>,
67}
68
69/// Fetches latest versions for multiple packages in parallel with progress reporting.
70///
71/// Returns a [`FetchResult`] containing successfully fetched versions and failure count.
72/// Packages that fail to fetch are omitted from the versions map.
73///
74/// This function executes all registry requests concurrently with per-dependency
75/// timeout isolation, preventing slow packages from blocking others.
76///
77/// # Arguments
78///
79/// * `registry` - Package registry to fetch from
80/// * `package_names` - List of package names to fetch
81/// * `progress` - Optional progress tracker (will be updated after each fetch)
82/// * `timeout_secs` - Timeout for each individual package fetch (default: 10s)
83/// * `max_concurrent` - Maximum concurrent fetches (default: 20)
84///
85/// # Timeout Behavior
86///
87/// Each package fetch is wrapped in an individual timeout. If a package
88/// takes longer than `timeout_secs` to fetch, it fails fast with a warning
89/// and does NOT block other packages.
90///
91/// # Performance
92///
93/// With 50 dependencies and 100ms per request:
94/// - Sequential: 50 × 100ms = 5000ms
95/// - Parallel (no timeout): max(100ms) ≈ 150ms
96/// - Parallel (10s timeout, 1 slow package at 30s): max(10s) ≈ 10s
97async fn fetch_latest_versions_parallel(
98    registry: Arc<dyn Registry>,
99    package_names: Vec<String>,
100    progress_sender: Option<ProgressSender>,
101    timeout_secs: u64,
102    max_concurrent: usize,
103) -> FetchResult {
104    use futures::stream::{self, StreamExt};
105    use std::time::Duration;
106
107    let fetched = Arc::new(std::sync::atomic::AtomicUsize::new(0));
108    let failed = Arc::new(std::sync::atomic::AtomicUsize::new(0));
109    let first_error: Arc<std::sync::Mutex<Option<String>>> = Arc::new(std::sync::Mutex::new(None));
110    let timeout = Duration::from_secs(timeout_secs);
111
112    let results: Vec<_> = stream::iter(package_names)
113        .map(|name| {
114            let registry = Arc::clone(&registry);
115            let fetched = Arc::clone(&fetched);
116            let failed = Arc::clone(&failed);
117            let first_error = Arc::clone(&first_error);
118            let progress_sender = progress_sender.clone();
119            async move {
120                let result =
121                    tokio::time::timeout(timeout, registry.get_latest_matching(&name, "*")).await;
122
123                let version = match result {
124                    Ok(Ok(Some(v))) => {
125                        tracing::debug!(package = %name, version = %v.version_string(), "fetched");
126                        Some((name.clone(), v.version_string().to_string()))
127                    }
128                    Ok(Ok(None)) => {
129                        tracing::debug!(package = %name, "no version found");
130                        None
131                    }
132                    Ok(Err(e)) => {
133                        tracing::warn!(package = %name, error = %e, "fetch failed");
134                        failed.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
135                        let mut fe = first_error.lock().unwrap_or_else(|p| p.into_inner());
136                        if fe.is_none() {
137                            *fe = Some(e.to_string());
138                        }
139                        None
140                    }
141                    Err(_) => {
142                        tracing::warn!(package = %name, "fetch timed out ({}s)", timeout.as_secs());
143                        failed.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
144                        None
145                    }
146                };
147
148                let count = fetched.fetch_add(1, std::sync::atomic::Ordering::Relaxed) + 1;
149                if let Some(ref sender) = progress_sender {
150                    sender.send(count);
151                }
152
153                version
154            }
155        })
156        .buffer_unordered(max_concurrent)
157        .collect()
158        .await;
159
160    FetchResult {
161        versions: results.into_iter().flatten().collect(),
162        failed_count: failed.load(std::sync::atomic::Ordering::Relaxed),
163        first_error: first_error.lock().unwrap_or_else(|p| p.into_inner()).take(),
164    }
165}
166
167/// Generic document open handler using ecosystem registry.
168///
169/// Parses manifest using the ecosystem's parser, creates document state,
170/// and spawns a background task to fetch version information from the registry.
171pub async fn handle_document_open(
172    uri: Uri,
173    content: String,
174    state: Arc<ServerState>,
175    client: Client,
176    config: Arc<RwLock<DepsConfig>>,
177) -> Result<JoinHandle<()>> {
178    // Find appropriate ecosystem for this URI
179    let ecosystem = match state.ecosystem_registry.get_for_uri(&uri) {
180        Some(e) => e,
181        None => {
182            tracing::debug!("No ecosystem handler for {:?}", uri);
183            return Err(deps_core::error::DepsError::UnsupportedEcosystem(format!(
184                "{uri:?}"
185            )));
186        }
187    };
188
189    tracing::info!(
190        "Opening {:?} with ecosystem: {}",
191        uri,
192        ecosystem.display_name()
193    );
194
195    // Try to parse manifest (may fail for incomplete syntax)
196    let parse_result = ecosystem.parse_manifest(&content, &uri).await.ok();
197
198    // Create document state (parse_result may be None)
199    let doc_state = if let Some(pr) = parse_result {
200        DocumentState::new_from_parse_result(ecosystem.id(), content, pr)
201    } else {
202        tracing::debug!("Failed to parse manifest, storing document without parse result");
203        DocumentState::new_without_parse_result(ecosystem.id(), content)
204    };
205
206    state.update_document(uri.clone(), doc_state);
207
208    // Clone cache config before spawning background task
209    let cache_config = { config.read().await.cache.clone() };
210
211    // Spawn background task to fetch versions
212    let uri_clone = uri.clone();
213    let state_clone = Arc::clone(&state);
214    let ecosystem_clone = Arc::clone(&ecosystem);
215    let client_clone = client.clone();
216
217    let task = tokio::spawn(async move {
218        tracing::debug!("background task started");
219
220        // Load resolved versions from lock file first (instant, no network)
221        let resolved_versions =
222            load_resolved_versions(&uri_clone, &state_clone, ecosystem_clone.as_ref()).await;
223
224        // Update document state with resolved versions immediately
225        if !resolved_versions.is_empty()
226            && let Some(mut doc) = state_clone.documents.get_mut(&uri_clone)
227        {
228            doc.update_resolved_versions(resolved_versions.clone());
229            // Use resolved versions as cached versions for instant display
230            doc.update_cached_versions(resolved_versions.clone());
231        }
232
233        // Collect dependency names while holding reference (can't hold across await)
234        let dep_names: Vec<String> = {
235            let doc = match state_clone.get_document(&uri_clone) {
236                Some(d) => d,
237                None => {
238                    tracing::warn!("document not found, aborting fetch");
239                    return;
240                }
241            };
242            let parse_result = match doc.parse_result() {
243                Some(p) => p,
244                None => {
245                    tracing::warn!("no parse result, aborting fetch");
246                    return;
247                }
248            };
249            parse_result
250                .dependencies()
251                .into_iter()
252                .map(|d| d.name().to_string())
253                .collect()
254        };
255
256        tracing::debug!(count = dep_names.len(), "starting registry fetch");
257
258        // Mark as loading and start progress
259        if let Some(mut doc) = state_clone.documents.get_mut(&uri_clone) {
260            doc.set_loading();
261        }
262
263        let (progress, progress_sender) = match tokio::time::timeout(
264            std::time::Duration::from_secs(2),
265            RegistryProgress::start(client_clone.clone(), uri_clone.as_str(), dep_names.len()),
266        )
267        .await
268        {
269            Ok(Ok((p, s))) => (Some(p), Some(s)),
270            _ => (None, None),
271        };
272
273        tracing::debug!("progress started, fetching versions");
274
275        // Fetch latest versions from registry in parallel (for update hints)
276        let registry = ecosystem_clone.registry();
277        let fetch_result = fetch_latest_versions_parallel(
278            registry,
279            dep_names,
280            progress_sender,
281            cache_config.fetch_timeout_secs,
282            cache_config.max_concurrent_fetches,
283        )
284        .await;
285
286        let success = !fetch_result.versions.is_empty();
287        tracing::debug!(
288            fetched = fetch_result.versions.len(),
289            failed = fetch_result.failed_count,
290            "registry fetch complete"
291        );
292
293        // Update document state with cached versions (latest from registry)
294        if let Some(mut doc) = state_clone.documents.get_mut(&uri_clone) {
295            doc.update_cached_versions(fetch_result.versions);
296            if success {
297                doc.set_loaded();
298            } else {
299                doc.set_failed();
300            }
301        }
302
303        // End progress
304        if let Some(progress) = progress {
305            progress.end(success).await;
306        }
307
308        // Notify user about failed packages
309        if fetch_result.failed_count > 0 {
310            let message = if let Some(err) = &fetch_result.first_error {
311                format!("deps-lsp: {err}")
312            } else {
313                format!(
314                    "deps-lsp: {} package(s) failed to fetch (timeout or network error)",
315                    fetch_result.failed_count
316                )
317            };
318            client_clone
319                .show_message(MessageType::WARNING, message)
320                .await;
321        }
322
323        // Refresh inlay hints IMMEDIATELY after loading completes
324        // (before diagnostics which may take longer due to additional network calls)
325        if let Err(e) = client_clone.inlay_hint_refresh().await {
326            tracing::debug!("inlay_hint_refresh not supported: {:?}", e);
327        }
328
329        // Publish diagnostics (may be slower, runs after hints are already visible)
330        let diags =
331            diagnostics::generate_diagnostics_internal(Arc::clone(&state_clone), &uri_clone).await;
332
333        client_clone
334            .publish_diagnostics(uri_clone.clone(), diags, None)
335            .await;
336    });
337
338    Ok(task)
339}
340
341/// Generic document change handler using ecosystem registry.
342///
343/// Re-parses manifest when document content changes and spawns a debounced
344/// task to update diagnostics and request inlay hint refresh.
345pub async fn handle_document_change(
346    uri: Uri,
347    content: String,
348    state: Arc<ServerState>,
349    client: Client,
350    config: Arc<RwLock<DepsConfig>>,
351) -> Result<JoinHandle<()>> {
352    // Find appropriate ecosystem for this URI
353    let ecosystem = match state.ecosystem_registry.get_for_uri(&uri) {
354        Some(e) => e,
355        None => {
356            tracing::debug!("No ecosystem handler for {:?}", uri);
357            return Err(deps_core::error::DepsError::UnsupportedEcosystem(format!(
358                "{uri:?}"
359            )));
360        }
361    };
362
363    // Extract old dependency names before parsing (for diff computation)
364    let old_dep_names: HashSet<String> =
365        state.get_document(&uri).map_or_else(HashSet::new, |doc| {
366            doc.parse_result()
367                .map(|pr| {
368                    pr.dependencies()
369                        .into_iter()
370                        .map(|d| d.name().to_string())
371                        .collect()
372                })
373                .unwrap_or_default()
374        });
375
376    // Try to parse manifest (may fail for incomplete syntax)
377    let parse_result = ecosystem.parse_manifest(&content, &uri).await.ok();
378
379    // Extract new dependency names for diff
380    let new_dep_names: HashSet<String> = parse_result
381        .as_ref()
382        .map(|pr| {
383            pr.dependencies()
384                .into_iter()
385                .map(|d| d.name().to_string())
386                .collect()
387        })
388        .unwrap_or_default();
389
390    // Compute dependency diff
391    let diff = DependencyDiff::compute(&old_dep_names, &new_dep_names);
392    tracing::debug!(
393        added = diff.added.len(),
394        removed = diff.removed.len(),
395        "dependency diff"
396    );
397
398    let mut doc_state = if let Some(pr) = parse_result {
399        DocumentState::new_from_parse_result(ecosystem.id(), content, pr)
400    } else {
401        tracing::debug!("Failed to parse manifest, storing document without parse result");
402        DocumentState::new_without_parse_result(ecosystem.id(), content)
403    };
404
405    if let Some(old_doc) = state.get_document(&uri) {
406        preserve_cache(&mut doc_state, &old_doc);
407    }
408
409    // Prune stale cache entries for removed dependencies
410    for removed_dep in &diff.removed {
411        doc_state.cached_versions.remove(removed_dep);
412        doc_state.resolved_versions.remove(removed_dep);
413    }
414
415    state.update_document(uri.clone(), doc_state);
416
417    // Clone cache config before spawning background task
418    let cache_config = { config.read().await.cache.clone() };
419
420    // Spawn background task to update diagnostics
421    let uri_clone = uri.clone();
422    let state_clone = Arc::clone(&state);
423    let ecosystem_clone = Arc::clone(&ecosystem);
424    let client_clone = client.clone();
425    let deps_to_fetch = diff.added;
426
427    let task = tokio::spawn(async move {
428        // Small debounce delay
429        tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
430
431        // Load resolved versions from lock file first (instant, no network)
432        let resolved_versions =
433            load_resolved_versions(&uri_clone, &state_clone, ecosystem_clone.as_ref()).await;
434
435        // Update document state with resolved versions only
436        // Do NOT touch cached_versions - they contain latest registry versions
437        if !resolved_versions.is_empty()
438            && let Some(mut doc) = state_clone.documents.get_mut(&uri_clone)
439        {
440            doc.update_resolved_versions(resolved_versions.clone());
441        }
442
443        // Skip registry fetch if no new dependencies
444        if deps_to_fetch.is_empty() {
445            tracing::debug!("no new dependencies, skipping registry fetch");
446
447            if let Some(mut doc) = state_clone.documents.get_mut(&uri_clone) {
448                doc.set_loaded();
449            }
450
451            if let Err(e) = client_clone.inlay_hint_refresh().await {
452                tracing::debug!("inlay_hint_refresh not supported: {:?}", e);
453            }
454
455            let diags =
456                diagnostics::generate_diagnostics_internal(Arc::clone(&state_clone), &uri_clone)
457                    .await;
458            client_clone
459                .publish_diagnostics(uri_clone.clone(), diags, None)
460                .await;
461            return;
462        }
463
464        tracing::info!(
465            count = deps_to_fetch.len(),
466            "fetching versions for new dependencies"
467        );
468
469        // Mark as loading and start progress
470        if let Some(mut doc) = state_clone.documents.get_mut(&uri_clone) {
471            doc.set_loading();
472        }
473
474        let (progress, progress_sender) = match tokio::time::timeout(
475            std::time::Duration::from_secs(2),
476            RegistryProgress::start(
477                client_clone.clone(),
478                uri_clone.as_str(),
479                deps_to_fetch.len(),
480            ),
481        )
482        .await
483        {
484            Ok(Ok((p, s))) => (Some(p), Some(s)),
485            _ => (None, None),
486        };
487
488        // Fetch latest versions only for NEW dependencies
489        let registry = ecosystem_clone.registry();
490        let fetch_result = fetch_latest_versions_parallel(
491            registry,
492            deps_to_fetch,
493            progress_sender,
494            cache_config.fetch_timeout_secs,
495            cache_config.max_concurrent_fetches,
496        )
497        .await;
498
499        let success = !fetch_result.versions.is_empty();
500
501        // Merge new versions into existing cache
502        if let Some(mut doc) = state_clone.documents.get_mut(&uri_clone) {
503            for (name, version) in fetch_result.versions {
504                doc.cached_versions.insert(name, version);
505            }
506            if success {
507                doc.set_loaded();
508            } else {
509                doc.set_failed();
510            }
511        }
512
513        if let Some(progress) = progress {
514            progress.end(success).await;
515        }
516
517        // Notify user about failed packages
518        if fetch_result.failed_count > 0 {
519            let message = if let Some(err) = &fetch_result.first_error {
520                format!("deps-lsp: {err}")
521            } else {
522                format!(
523                    "deps-lsp: {} package(s) failed to fetch (timeout or network error)",
524                    fetch_result.failed_count
525                )
526            };
527            client_clone
528                .show_message(MessageType::WARNING, message)
529                .await;
530        }
531
532        if let Err(e) = client_clone.inlay_hint_refresh().await {
533            tracing::debug!("inlay_hint_refresh not supported: {:?}", e);
534        }
535
536        let diags =
537            diagnostics::generate_diagnostics_internal(Arc::clone(&state_clone), &uri_clone).await;
538
539        client_clone
540            .publish_diagnostics(uri_clone.clone(), diags, None)
541            .await;
542    });
543
544    Ok(task)
545}
546
547/// Loads resolved versions from lock file for a given manifest URI.
548///
549/// Uses the ecosystem's lockfile provider to parse the lock file.
550/// Returns a HashMap mapping package names to their resolved versions.
551/// Returns an empty HashMap if no lock file is found or parsing fails.
552async fn load_resolved_versions(
553    uri: &Uri,
554    state: &ServerState,
555    ecosystem: &dyn Ecosystem,
556) -> HashMap<String, String> {
557    let lock_provider = match ecosystem.lockfile_provider() {
558        Some(p) => p,
559        None => {
560            tracing::debug!("No lock file provider for ecosystem {}", ecosystem.id());
561            return HashMap::new();
562        }
563    };
564
565    let lockfile_path = match lock_provider.locate_lockfile(uri) {
566        Some(path) => path,
567        None => {
568            tracing::debug!("No lock file found for {:?}", uri);
569            return HashMap::new();
570        }
571    };
572
573    match state
574        .lockfile_cache
575        .get_or_parse(lock_provider.as_ref(), &lockfile_path)
576        .await
577    {
578        Ok(resolved) => {
579            tracing::info!(
580                "Loaded {} resolved versions from {}",
581                resolved.len(),
582                lockfile_path.display()
583            );
584            resolved
585                .iter()
586                .map(|(name, pkg)| (name.clone(), pkg.version.clone()))
587                .collect()
588        }
589        Err(e) => {
590            tracing::warn!("Failed to parse lock file: {}", e);
591            HashMap::new()
592        }
593    }
594}
595
596/// Ensures a document is loaded in state.
597///
598/// If the document is not already in state, loads it from disk,
599/// parses it, and spawns a background task to fetch version information.
600///
601/// This function is idempotent - calling it multiple times with the
602/// same URI is safe and will only load once.
603///
604/// # Arguments
605///
606/// * `uri` - Document URI
607/// * `state` - Server state
608/// * `client` - LSP client for notifications
609/// * `config` - Server configuration
610///
611/// # Returns
612///
613/// * `true` - Document is now loaded (either already existed or was just loaded)
614/// * `false` - Document could not be loaded (unsupported file type, read error, etc.)
615///
616/// # Behavior
617///
618/// - If document exists in state → Return true immediately (no-op)
619/// - If document doesn't exist → Load from disk, parse, update state, spawn bg task
620/// - If load fails → Log warning and return false (graceful degradation)
621///
622/// # Examples
623///
624/// ```no_run
625/// use deps_lsp::document::ensure_document_loaded;
626/// use deps_lsp::document::ServerState;
627/// use tower_lsp_server::ls_types::Uri;
628/// use std::sync::Arc;
629///
630/// # async fn example(
631/// #     uri: &Uri,
632/// #     state: Arc<ServerState>,
633/// #     client: tower_lsp_server::Client,
634/// #     config: Arc<tokio::sync::RwLock<deps_lsp::config::DepsConfig>>,
635/// # ) {
636/// let loaded = ensure_document_loaded(uri, state, client, config).await;
637/// if loaded {
638///     println!("Document is available for processing");
639/// }
640/// # }
641/// ```
642pub async fn ensure_document_loaded(
643    uri: &Uri,
644    state: Arc<ServerState>,
645    client: Client,
646    config: Arc<RwLock<DepsConfig>>,
647) -> bool {
648    // Fast path: document already loaded
649    if state.get_document(uri).is_some() {
650        tracing::debug!("Document already loaded: {:?}", uri);
651        return true;
652    }
653
654    // Clone cold start config before async operations to release lock
655    let cold_start_config = { config.read().await.cold_start.clone() };
656
657    // Check if cold start is enabled
658    if !cold_start_config.enabled {
659        tracing::debug!("Cold start disabled via configuration");
660        return false;
661    }
662
663    // Rate limiting check
664    if !state.cold_start_limiter.allow_cold_start(uri) {
665        tracing::warn!("Cold start rate limited: {:?}", uri);
666        return false;
667    }
668
669    // Check if we support this file type
670    if state.ecosystem_registry.get_for_uri(uri).is_none() {
671        tracing::debug!("Unsupported file type: {:?}", uri);
672        return false;
673    }
674
675    // Load from disk
676    tracing::info!("Loading document from disk (cold start): {:?}", uri);
677    let content = match load_document_from_disk(uri).await {
678        Ok(c) => c,
679        Err(e) => {
680            tracing::warn!("Failed to load document {:?}: {}", uri, e);
681            client
682                .log_message(MessageType::WARNING, format!("Could not load file: {e}"))
683                .await;
684            return false;
685        }
686    };
687
688    // Reuse existing handle_document_open logic
689    match handle_document_open(
690        uri.clone(),
691        content,
692        Arc::clone(&state),
693        client.clone(),
694        Arc::clone(&config),
695    )
696    .await
697    {
698        Ok(task) => {
699            state.spawn_background_task(uri.clone(), task).await;
700            tracing::info!("Document loaded successfully from disk: {:?}", uri);
701            true
702        }
703        Err(e) => {
704            tracing::warn!("Failed to process loaded document {:?}: {}", uri, e);
705            false
706        }
707    }
708}
709
710#[cfg(test)]
711mod tests {
712    use super::*;
713
714    // Generic tests (no feature flag required)
715
716    #[test]
717    fn test_ecosystem_registry_unknown_file() {
718        let state = ServerState::new();
719        let unknown_uri =
720            tower_lsp_server::ls_types::Uri::from_file_path("/test/unknown.txt").unwrap();
721        assert!(state.ecosystem_registry.get_for_uri(&unknown_uri).is_none());
722    }
723
724    #[tokio::test]
725    async fn test_ensure_document_loaded_unsupported_file_check() {
726        // Returns false for unknown file types (e.g., README.md)
727        let state = Arc::new(ServerState::new());
728        let uri = Uri::from_file_path("/test/README.md").unwrap();
729
730        // Verify ecosystem registry correctly identifies unsupported files
731        assert!(
732            state.ecosystem_registry.get_for_uri(&uri).is_none(),
733            "README.md should not have an ecosystem handler"
734        );
735
736        // This would cause ensure_document_loaded to return false
737        // We test the underlying condition without needing Client
738    }
739
740    #[tokio::test]
741    async fn test_ensure_document_loaded_file_not_found_check() {
742        // Test that load_document_from_disk fails gracefully for missing files
743        use super::load_document_from_disk;
744
745        let uri = Uri::from_file_path("/nonexistent/Cargo.toml").unwrap();
746        let result = load_document_from_disk(&uri).await;
747
748        assert!(result.is_err(), "Should fail for missing files");
749
750        // This error would cause ensure_document_loaded to return false
751    }
752
753    #[tokio::test]
754    async fn test_fetch_latest_versions_parallel_with_timeout() {
755        use deps_core::{Metadata, Registry, Version};
756        use std::any::Any;
757        use std::time::Duration;
758
759        // Mock registry that always times out
760        struct TimeoutRegistry;
761
762        impl Registry for TimeoutRegistry {
763            fn get_versions<'a>(
764                &'a self,
765                _name: &'a str,
766            ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Version>>>>
767            {
768                Box::pin(async move {
769                    // Sleep longer than timeout (10s default)
770                    tokio::time::sleep(Duration::from_secs(10)).await;
771                    Ok(vec![])
772                })
773            }
774
775            fn get_latest_matching<'a>(
776                &'a self,
777                _name: &'a str,
778                _req: &'a str,
779            ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Option<Box<dyn Version>>>>
780            {
781                Box::pin(async move {
782                    // Sleep longer than timeout
783                    tokio::time::sleep(Duration::from_secs(10)).await;
784                    Ok(None)
785                })
786            }
787
788            fn search<'a>(
789                &'a self,
790                _query: &'a str,
791                _limit: usize,
792            ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Metadata>>>>
793            {
794                Box::pin(async move { Ok(vec![]) })
795            }
796
797            fn package_url(&self, name: &str) -> String {
798                format!("https://example.com/{}", name)
799            }
800
801            fn as_any(&self) -> &dyn Any {
802                self
803            }
804        }
805
806        let registry: Arc<dyn Registry> = Arc::new(TimeoutRegistry);
807        let packages = vec!["slow-package".to_string()];
808
809        // Use 1 second timeout for test speed
810        let result = fetch_latest_versions_parallel(registry, packages, None, 1, 10).await;
811
812        // Should return empty (timeout, not success)
813        assert!(result.versions.is_empty(), "Slow package should timeout");
814        assert_eq!(result.failed_count, 1, "Should track 1 failed package");
815    }
816
817    #[tokio::test]
818    async fn test_fetch_latest_versions_parallel_fast_packages_not_blocked() {
819        use deps_core::{Metadata, Registry, Version};
820        use std::any::Any;
821        use std::time::Duration;
822
823        // Mock registry with one slow, one fast package
824        struct MixedRegistry;
825
826        impl Registry for MixedRegistry {
827            fn get_versions<'a>(
828                &'a self,
829                name: &'a str,
830            ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Version>>>>
831            {
832                Box::pin(async move {
833                    if name == "slow-package" {
834                        // Sleep longer than timeout
835                        tokio::time::sleep(Duration::from_secs(10)).await;
836                    }
837                    // Fast package or unknown: return immediately
838                    Ok(vec![])
839                })
840            }
841
842            fn get_latest_matching<'a>(
843                &'a self,
844                name: &'a str,
845                _req: &'a str,
846            ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Option<Box<dyn Version>>>>
847            {
848                Box::pin(async move {
849                    if name == "slow-package" {
850                        // Sleep longer than timeout
851                        tokio::time::sleep(Duration::from_secs(10)).await;
852                    }
853                    // Fast package or unknown: return immediately (no versions)
854                    Ok(None)
855                })
856            }
857
858            fn search<'a>(
859                &'a self,
860                _query: &'a str,
861                _limit: usize,
862            ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Metadata>>>>
863            {
864                Box::pin(async move { Ok(vec![]) })
865            }
866
867            fn package_url(&self, name: &str) -> String {
868                format!("https://example.com/{}", name)
869            }
870
871            fn as_any(&self) -> &dyn Any {
872                self
873            }
874        }
875
876        let registry: Arc<dyn Registry> = Arc::new(MixedRegistry);
877        let packages = vec!["slow-package".to_string(), "fast-package".to_string()];
878
879        let start = std::time::Instant::now();
880        let result = fetch_latest_versions_parallel(registry, packages, None, 1, 10).await;
881        let elapsed = start.elapsed();
882
883        // Should complete in ~1s (timeout), not 10s (slow package duration)
884        assert!(
885            elapsed < Duration::from_secs(3),
886            "Should not wait for slow package: {:?}",
887            elapsed
888        );
889
890        // Fast package processed (no versions), slow package timed out
891        assert!(
892            result.versions.is_empty(),
893            "No versions returned (test registry returns empty)"
894        );
895        assert_eq!(
896            result.failed_count, 1,
897            "Slow package should be marked as failed"
898        );
899    }
900
901    #[tokio::test]
902    async fn test_fetch_latest_versions_parallel_concurrency_limit() {
903        use deps_core::{Metadata, Registry, Version};
904        use std::any::Any;
905        use std::sync::atomic::{AtomicUsize, Ordering};
906        use std::time::Duration;
907
908        // Mock registry that tracks concurrent requests
909        struct ConcurrencyTrackingRegistry {
910            current: Arc<AtomicUsize>,
911            max_seen: Arc<AtomicUsize>,
912        }
913
914        impl Registry for ConcurrencyTrackingRegistry {
915            fn get_versions<'a>(
916                &'a self,
917                _name: &'a str,
918            ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Version>>>>
919            {
920                Box::pin(async move {
921                    // Increment concurrent counter
922                    let current = self.current.fetch_add(1, Ordering::SeqCst) + 1;
923
924                    // Track max concurrent
925                    self.max_seen.fetch_max(current, Ordering::SeqCst);
926
927                    // Simulate work
928                    tokio::time::sleep(Duration::from_millis(50)).await;
929
930                    // Decrement counter
931                    self.current.fetch_sub(1, Ordering::SeqCst);
932
933                    Ok(vec![])
934                })
935            }
936
937            fn get_latest_matching<'a>(
938                &'a self,
939                _name: &'a str,
940                _req: &'a str,
941            ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Option<Box<dyn Version>>>>
942            {
943                Box::pin(async move {
944                    // Increment concurrent counter
945                    let current = self.current.fetch_add(1, Ordering::SeqCst) + 1;
946
947                    // Track max concurrent
948                    self.max_seen.fetch_max(current, Ordering::SeqCst);
949
950                    // Simulate work
951                    tokio::time::sleep(Duration::from_millis(50)).await;
952
953                    // Decrement counter
954                    self.current.fetch_sub(1, Ordering::SeqCst);
955
956                    Ok(None)
957                })
958            }
959
960            fn search<'a>(
961                &'a self,
962                _query: &'a str,
963                _limit: usize,
964            ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Metadata>>>>
965            {
966                Box::pin(async move { Ok(vec![]) })
967            }
968
969            fn package_url(&self, name: &str) -> String {
970                format!("https://example.com/{}", name)
971            }
972
973            fn as_any(&self) -> &dyn Any {
974                self
975            }
976        }
977
978        let current = Arc::new(AtomicUsize::new(0));
979        let max_seen = Arc::new(AtomicUsize::new(0));
980
981        let registry: Arc<dyn Registry> = Arc::new(ConcurrencyTrackingRegistry {
982            current: Arc::clone(&current),
983            max_seen: Arc::clone(&max_seen),
984        });
985
986        // Create 50 packages, limit concurrency to 20
987        let packages: Vec<String> = (0..50).map(|i| format!("package-{}", i)).collect();
988
989        fetch_latest_versions_parallel(registry, packages, None, 5, 20).await;
990
991        // Max concurrent should not exceed limit (allow small margin for timing)
992        let max = max_seen.load(Ordering::SeqCst);
993        assert!(
994            max <= 22,
995            "Concurrency limit violated: {} concurrent requests (limit: 20)",
996            max
997        );
998    }
999
1000    #[tokio::test]
1001    async fn test_fetch_partial_success_with_mixed_outcomes() {
1002        use deps_core::{Metadata, Registry, Version};
1003        use std::any::Any;
1004        use std::time::Duration;
1005
1006        // Mock version for successful fetches
1007        #[derive(Debug)]
1008        struct MockVersion {
1009            version: String,
1010        }
1011
1012        impl Version for MockVersion {
1013            fn version_string(&self) -> &str {
1014                &self.version
1015            }
1016
1017            fn is_prerelease(&self) -> bool {
1018                false
1019            }
1020
1021            fn is_yanked(&self) -> bool {
1022                false
1023            }
1024
1025            fn as_any(&self) -> &dyn Any {
1026                self
1027            }
1028        }
1029
1030        // Mock registry with mixed outcomes:
1031        // - "package-fast" returns quickly with version
1032        // - "package-slow" times out
1033        // - "package-error" returns error
1034        struct MixedOutcomeRegistry;
1035
1036        impl Registry for MixedOutcomeRegistry {
1037            fn get_versions<'a>(
1038                &'a self,
1039                name: &'a str,
1040            ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Version>>>>
1041            {
1042                Box::pin(async move {
1043                    match name {
1044                        "package-fast" => {
1045                            // Return immediately with a stable version
1046                            Ok(vec![Box::new(MockVersion {
1047                                version: "1.0.0".to_string(),
1048                            }) as Box<dyn Version>])
1049                        }
1050                        "package-slow" => {
1051                            // Sleep longer than timeout (test uses 1s timeout)
1052                            tokio::time::sleep(Duration::from_secs(10)).await;
1053                            Ok(vec![])
1054                        }
1055                        "package-error" => {
1056                            // Return cache error (simpler for testing)
1057                            Err(deps_core::error::DepsError::CacheError(
1058                                "Mock registry error".to_string(),
1059                            ))
1060                        }
1061                        _ => Ok(vec![]),
1062                    }
1063                })
1064            }
1065
1066            fn get_latest_matching<'a>(
1067                &'a self,
1068                name: &'a str,
1069                _req: &'a str,
1070            ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Option<Box<dyn Version>>>>
1071            {
1072                Box::pin(async move {
1073                    match name {
1074                        "package-fast" => Ok(Some(Box::new(MockVersion {
1075                            version: "1.0.0".to_string(),
1076                        }) as Box<dyn Version>)),
1077                        "package-slow" => {
1078                            tokio::time::sleep(Duration::from_secs(10)).await;
1079                            Ok(None)
1080                        }
1081                        "package-error" => Err(deps_core::error::DepsError::CacheError(
1082                            "Mock registry error".to_string(),
1083                        )),
1084                        _ => Ok(None),
1085                    }
1086                })
1087            }
1088
1089            fn search<'a>(
1090                &'a self,
1091                _query: &'a str,
1092                _limit: usize,
1093            ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Metadata>>>>
1094            {
1095                Box::pin(async move { Ok(vec![]) })
1096            }
1097
1098            fn package_url(&self, name: &str) -> String {
1099                format!("https://example.com/{}", name)
1100            }
1101
1102            fn as_any(&self) -> &dyn Any {
1103                self
1104            }
1105        }
1106
1107        let registry: Arc<dyn Registry> = Arc::new(MixedOutcomeRegistry);
1108        let packages = vec![
1109            "package-fast".to_string(),
1110            "package-slow".to_string(),
1111            "package-error".to_string(),
1112        ];
1113
1114        // Use 1 second timeout for test speed
1115        let result = fetch_latest_versions_parallel(registry, packages, None, 1, 10).await;
1116
1117        // Only the fast package should be in results
1118        assert_eq!(
1119            result.versions.len(),
1120            1,
1121            "Should have exactly 1 successful package"
1122        );
1123        assert_eq!(
1124            result.versions.get("package-fast"),
1125            Some(&"1.0.0".to_string()),
1126            "Fast package should have correct version"
1127        );
1128        assert!(
1129            !result.versions.contains_key("package-slow"),
1130            "Slow package should not be in results (timeout)"
1131        );
1132        assert!(
1133            !result.versions.contains_key("package-error"),
1134            "Error package should not be in results"
1135        );
1136    }
1137
1138    #[tokio::test]
1139    async fn test_fetch_registry_error_handled() {
1140        use deps_core::{Metadata, Registry, Version};
1141        use std::any::Any;
1142
1143        // Mock registry that returns errors for all packages
1144        struct ErrorRegistry;
1145
1146        impl Registry for ErrorRegistry {
1147            fn get_versions<'a>(
1148                &'a self,
1149                name: &'a str,
1150            ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Version>>>>
1151            {
1152                Box::pin(async move {
1153                    Err(deps_core::error::DepsError::CacheError(format!(
1154                        "Failed to fetch package: {}",
1155                        name
1156                    )))
1157                })
1158            }
1159
1160            fn get_latest_matching<'a>(
1161                &'a self,
1162                name: &'a str,
1163                _req: &'a str,
1164            ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Option<Box<dyn Version>>>>
1165            {
1166                Box::pin(async move {
1167                    Err(deps_core::error::DepsError::CacheError(format!(
1168                        "Failed to fetch package: {}",
1169                        name
1170                    )))
1171                })
1172            }
1173
1174            fn search<'a>(
1175                &'a self,
1176                _query: &'a str,
1177                _limit: usize,
1178            ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Metadata>>>>
1179            {
1180                Box::pin(async move { Ok(vec![]) })
1181            }
1182
1183            fn package_url(&self, name: &str) -> String {
1184                format!("https://example.com/{}", name)
1185            }
1186
1187            fn as_any(&self) -> &dyn Any {
1188                self
1189            }
1190        }
1191
1192        let registry: Arc<dyn Registry> = Arc::new(ErrorRegistry);
1193        let packages = vec![
1194            "package-1".to_string(),
1195            "package-2".to_string(),
1196            "package-3".to_string(),
1197        ];
1198
1199        // Should not panic, just return empty result
1200        let result = fetch_latest_versions_parallel(registry, packages, None, 5, 10).await;
1201
1202        // All packages failed, result should be empty
1203        assert!(
1204            result.versions.is_empty(),
1205            "All packages with errors should be omitted from results"
1206        );
1207        assert_eq!(
1208            result.failed_count, 3,
1209            "All 3 packages should be marked as failed"
1210        );
1211    }
1212
1213    // Cargo-specific tests
1214    #[cfg(feature = "cargo")]
1215    mod cargo_tests {
1216        use super::*;
1217
1218        #[test]
1219        fn test_ecosystem_registry_lookup() {
1220            let state = ServerState::new();
1221            let cargo_uri =
1222                tower_lsp_server::ls_types::Uri::from_file_path("/test/Cargo.toml").unwrap();
1223            assert!(state.ecosystem_registry.get_for_uri(&cargo_uri).is_some());
1224        }
1225
1226        #[tokio::test]
1227        async fn test_document_parsing() {
1228            let state = Arc::new(ServerState::new());
1229            let uri = tower_lsp_server::ls_types::Uri::from_file_path("/test/Cargo.toml").unwrap();
1230            let content = r#"[dependencies]
1231serde = "1.0"
1232"#;
1233
1234            let ecosystem = state
1235                .ecosystem_registry
1236                .get_for_uri(&uri)
1237                .expect("Cargo ecosystem not found");
1238
1239            let parse_result = ecosystem.parse_manifest(content, &uri).await;
1240            assert!(parse_result.is_ok());
1241
1242            let doc_state = DocumentState::new_from_parse_result(
1243                "cargo",
1244                content.to_string(),
1245                parse_result.unwrap(),
1246            );
1247            state.update_document(uri.clone(), doc_state);
1248
1249            assert_eq!(state.document_count(), 1);
1250            let doc = state.get_document(&uri).unwrap();
1251            assert_eq!(doc.ecosystem_id, "cargo");
1252        }
1253
1254        #[tokio::test]
1255        async fn test_document_stored_even_when_parsing_fails() {
1256            let state = Arc::new(ServerState::new());
1257            let uri = tower_lsp_server::ls_types::Uri::from_file_path("/test/Cargo.toml").unwrap();
1258            // Invalid TOML that will fail parsing
1259            let content = r#"[dependencies
1260serde = "1.0"
1261"#;
1262
1263            let ecosystem = state
1264                .ecosystem_registry
1265                .get_for_uri(&uri)
1266                .expect("Cargo ecosystem not found");
1267
1268            // Try to parse (will fail)
1269            let parse_result = ecosystem.parse_manifest(content, &uri).await.ok();
1270            assert!(
1271                parse_result.is_none(),
1272                "Parsing should fail for invalid TOML"
1273            );
1274
1275            // Create document state without parse result
1276            let doc_state = if let Some(pr) = parse_result {
1277                DocumentState::new_from_parse_result("cargo", content.to_string(), pr)
1278            } else {
1279                DocumentState::new_without_parse_result("cargo", content.to_string())
1280            };
1281
1282            state.update_document(uri.clone(), doc_state);
1283
1284            // Document should be stored despite parse failure
1285            let doc = state.get_document(&uri);
1286            assert!(
1287                doc.is_some(),
1288                "Document should be stored even when parsing fails"
1289            );
1290
1291            let doc = doc.unwrap();
1292            assert_eq!(doc.ecosystem_id, "cargo");
1293            assert_eq!(doc.content, content);
1294            assert!(
1295                doc.parse_result().is_none(),
1296                "Parse result should be None for failed parse"
1297            );
1298        }
1299
1300        #[tokio::test]
1301        async fn test_ensure_document_loaded_fast_path() {
1302            // Fast path: document already loaded, should return true without loading
1303            let state = Arc::new(ServerState::new());
1304            let uri = Uri::from_file_path("/test/Cargo.toml").unwrap();
1305            let content = r#"[dependencies]
1306serde = "1.0""#;
1307
1308            // Pre-populate state with document
1309            let ecosystem = state
1310                .ecosystem_registry
1311                .get_for_uri(&uri)
1312                .expect("Cargo ecosystem");
1313            let parse_result = ecosystem.parse_manifest(content, &uri).await.unwrap();
1314            let doc_state =
1315                DocumentState::new_from_parse_result("cargo", content.to_string(), parse_result);
1316            state.update_document(uri.clone(), doc_state);
1317
1318            // Fast path check: document exists
1319            assert!(
1320                state.get_document(&uri).is_some(),
1321                "Document should exist in state"
1322            );
1323            assert_eq!(state.document_count(), 1, "Document count should be 1");
1324
1325            // The fast path in ensure_document_loaded would return true here without
1326            // requiring a Client. We test the condition directly since creating a test
1327            // Client requires complex tower-lsp-server internals (ServerState, ClientSocket).
1328        }
1329
1330        #[tokio::test]
1331        async fn test_ensure_document_loaded_successful_disk_load() {
1332            // Test successful load from filesystem with temp file
1333            use super::super::load_document_from_disk;
1334            use std::fs;
1335            use tempfile::TempDir;
1336
1337            // Create a temporary directory with a Cargo.toml file
1338            let temp_dir = TempDir::new().unwrap();
1339            let cargo_toml_path = temp_dir.path().join("Cargo.toml");
1340            let content = r#"[package]
1341name = "test"
1342version = "0.1.0"
1343
1344[dependencies]
1345serde = "1.0"
1346"#;
1347            fs::write(&cargo_toml_path, content).unwrap();
1348
1349            let uri = Uri::from_file_path(&cargo_toml_path).unwrap();
1350
1351            // Test that load_document_from_disk succeeds
1352            let loaded_content = load_document_from_disk(&uri).await.unwrap();
1353            assert_eq!(loaded_content, content);
1354
1355            // Test that parsing succeeds
1356            let state = Arc::new(ServerState::new());
1357            let ecosystem = state
1358                .ecosystem_registry
1359                .get_for_uri(&uri)
1360                .expect("Cargo ecosystem");
1361            let parse_result = ecosystem.parse_manifest(&loaded_content, &uri).await;
1362            assert!(parse_result.is_ok(), "Should parse successfully");
1363
1364            // These successful operations are the building blocks of ensure_document_loaded
1365        }
1366
1367        #[tokio::test]
1368        async fn test_ensure_document_loaded_idempotent_check() {
1369            // Test that repeated loads are idempotent at the state level
1370            let state = Arc::new(ServerState::new());
1371            let uri = Uri::from_file_path("/test/Cargo.toml").unwrap();
1372            let content = r#"[dependencies]
1373serde = "1.0""#;
1374
1375            let ecosystem = state
1376                .ecosystem_registry
1377                .get_for_uri(&uri)
1378                .expect("Cargo ecosystem");
1379
1380            // Parse twice to simulate idempotent loads
1381            let parse_result1 = ecosystem.parse_manifest(content, &uri).await.unwrap();
1382            let parse_result2 = ecosystem.parse_manifest(content, &uri).await.unwrap();
1383
1384            // First update
1385            let doc_state1 =
1386                DocumentState::new_from_parse_result("cargo", content.to_string(), parse_result1);
1387            state.update_document(uri.clone(), doc_state1);
1388            assert_eq!(state.document_count(), 1);
1389
1390            // Second update (idempotent)
1391            let doc_state2 =
1392                DocumentState::new_from_parse_result("cargo", content.to_string(), parse_result2);
1393            state.update_document(uri.clone(), doc_state2);
1394            assert_eq!(
1395                state.document_count(),
1396                1,
1397                "Should still have only 1 document"
1398            );
1399        }
1400    }
1401
1402    // npm-specific tests
1403    #[cfg(feature = "npm")]
1404    mod npm_tests {
1405        use super::*;
1406
1407        #[test]
1408        fn test_ecosystem_registry_lookup() {
1409            let state = ServerState::new();
1410            let npm_uri =
1411                tower_lsp_server::ls_types::Uri::from_file_path("/test/package.json").unwrap();
1412            assert!(state.ecosystem_registry.get_for_uri(&npm_uri).is_some());
1413        }
1414
1415        #[tokio::test]
1416        async fn test_document_parsing() {
1417            let state = Arc::new(ServerState::new());
1418            let uri =
1419                tower_lsp_server::ls_types::Uri::from_file_path("/test/package.json").unwrap();
1420            let content = r#"{"dependencies": {"express": "^4.18.0"}}"#;
1421
1422            let ecosystem = state
1423                .ecosystem_registry
1424                .get_for_uri(&uri)
1425                .expect("npm ecosystem not found");
1426
1427            let parse_result = ecosystem.parse_manifest(content, &uri).await;
1428            assert!(parse_result.is_ok());
1429
1430            let doc_state = DocumentState::new_from_parse_result(
1431                "npm",
1432                content.to_string(),
1433                parse_result.unwrap(),
1434            );
1435            state.update_document(uri.clone(), doc_state);
1436
1437            let doc = state.get_document(&uri).unwrap();
1438            assert_eq!(doc.ecosystem_id, "npm");
1439        }
1440    }
1441
1442    // PyPI-specific tests
1443    #[cfg(feature = "pypi")]
1444    mod pypi_tests {
1445        use super::*;
1446
1447        #[test]
1448        fn test_ecosystem_registry_lookup() {
1449            let state = ServerState::new();
1450            let pypi_uri =
1451                tower_lsp_server::ls_types::Uri::from_file_path("/test/pyproject.toml").unwrap();
1452            assert!(state.ecosystem_registry.get_for_uri(&pypi_uri).is_some());
1453        }
1454
1455        #[tokio::test]
1456        async fn test_document_parsing() {
1457            let state = Arc::new(ServerState::new());
1458            let uri =
1459                tower_lsp_server::ls_types::Uri::from_file_path("/test/pyproject.toml").unwrap();
1460            let content = r#"[project]
1461dependencies = ["requests>=2.0.0"]
1462"#;
1463
1464            let ecosystem = state
1465                .ecosystem_registry
1466                .get_for_uri(&uri)
1467                .expect("pypi ecosystem not found");
1468
1469            let parse_result = ecosystem.parse_manifest(content, &uri).await;
1470            assert!(parse_result.is_ok());
1471
1472            let doc_state = DocumentState::new_from_parse_result(
1473                "pypi",
1474                content.to_string(),
1475                parse_result.unwrap(),
1476            );
1477            state.update_document(uri.clone(), doc_state);
1478
1479            let doc = state.get_document(&uri).unwrap();
1480            assert_eq!(doc.ecosystem_id, "pypi");
1481        }
1482    }
1483
1484    // Go-specific tests
1485    #[cfg(feature = "go")]
1486    mod go_tests {
1487        use super::*;
1488
1489        #[test]
1490        fn test_ecosystem_registry_lookup() {
1491            let state = ServerState::new();
1492            let go_uri = tower_lsp_server::ls_types::Uri::from_file_path("/test/go.mod").unwrap();
1493            assert!(state.ecosystem_registry.get_for_uri(&go_uri).is_some());
1494        }
1495
1496        #[tokio::test]
1497        async fn test_document_parsing() {
1498            let state = Arc::new(ServerState::new());
1499            let uri = tower_lsp_server::ls_types::Uri::from_file_path("/test/go.mod").unwrap();
1500            let content = r"module example.com/mymodule
1501
1502go 1.21
1503
1504require github.com/gorilla/mux v1.8.0
1505";
1506
1507            let ecosystem = state
1508                .ecosystem_registry
1509                .get_for_uri(&uri)
1510                .expect("go ecosystem not found");
1511
1512            let parse_result = ecosystem.parse_manifest(content, &uri).await;
1513            assert!(parse_result.is_ok());
1514
1515            let doc_state = DocumentState::new_from_parse_result(
1516                "go",
1517                content.to_string(),
1518                parse_result.unwrap(),
1519            );
1520            state.update_document(uri.clone(), doc_state);
1521
1522            let doc = state.get_document(&uri).unwrap();
1523            assert_eq!(doc.ecosystem_id, "go");
1524        }
1525    }
1526
1527    // Phase 1: Cache Preservation Tests
1528    #[cfg(feature = "cargo")]
1529    mod incremental_fetch_tests {
1530        use super::*;
1531
1532        #[tokio::test]
1533        async fn test_preserve_cached_versions_on_change() {
1534            let state = Arc::new(ServerState::new());
1535            let uri = Uri::from_file_path("/test/Cargo.toml").unwrap();
1536
1537            // Initial document with 2 dependencies
1538            let content1 = r#"[dependencies]
1539serde = "1.0"
1540tokio = "1.0"
1541"#;
1542
1543            let ecosystem = state.ecosystem_registry.get("cargo").unwrap();
1544            let parse_result1 = ecosystem.parse_manifest(content1, &uri).await.unwrap();
1545            let doc_state1 =
1546                DocumentState::new_from_parse_result("cargo", content1.to_string(), parse_result1);
1547            state.update_document(uri.clone(), doc_state1);
1548
1549            // Manually populate cache (simulating background fetch)
1550            {
1551                let mut doc = state.documents.get_mut(&uri).unwrap();
1552                doc.cached_versions
1553                    .insert("serde".to_string(), "1.0.210".to_string());
1554                doc.cached_versions
1555                    .insert("tokio".to_string(), "1.40.0".to_string());
1556                doc.resolved_versions
1557                    .insert("serde".to_string(), "1.0.195".to_string());
1558                doc.resolved_versions
1559                    .insert("tokio".to_string(), "1.35.0".to_string());
1560            }
1561
1562            // Verify cache populated
1563            {
1564                let doc = state.get_document(&uri).unwrap();
1565                assert_eq!(doc.cached_versions.len(), 2);
1566                assert_eq!(doc.resolved_versions.len(), 2);
1567            }
1568
1569            // Change document (modify serde version)
1570            let content2 = r#"[dependencies]
1571serde = "1.0.210"
1572tokio = "1.0"
1573"#;
1574
1575            let parse_result2 = ecosystem.parse_manifest(content2, &uri).await.unwrap();
1576            let mut doc_state2 =
1577                DocumentState::new_from_parse_result("cargo", content2.to_string(), parse_result2);
1578
1579            if let Some(old_doc) = state.get_document(&uri) {
1580                preserve_cache(&mut doc_state2, &old_doc);
1581            }
1582
1583            state.update_document(uri.clone(), doc_state2);
1584
1585            // Verify cache preserved after update
1586            {
1587                let doc = state.get_document(&uri).unwrap();
1588                assert_eq!(
1589                    doc.cached_versions.len(),
1590                    2,
1591                    "Cached versions should be preserved"
1592                );
1593                assert_eq!(
1594                    doc.cached_versions.get("serde"),
1595                    Some(&"1.0.210".to_string()),
1596                    "serde cache preserved"
1597                );
1598                assert_eq!(
1599                    doc.cached_versions.get("tokio"),
1600                    Some(&"1.40.0".to_string()),
1601                    "tokio cache preserved"
1602                );
1603                assert_eq!(
1604                    doc.resolved_versions.len(),
1605                    2,
1606                    "Resolved versions should be preserved"
1607                );
1608            }
1609        }
1610
1611        #[tokio::test]
1612        async fn test_first_open_has_empty_cache() {
1613            let state = Arc::new(ServerState::new());
1614            let uri = Uri::from_file_path("/test/Cargo.toml").unwrap();
1615
1616            let content = r#"[dependencies]
1617serde = "1.0"
1618"#;
1619
1620            let ecosystem = state.ecosystem_registry.get("cargo").unwrap();
1621            let parse_result = ecosystem.parse_manifest(content, &uri).await.unwrap();
1622            let doc_state =
1623                DocumentState::new_from_parse_result("cargo", content.to_string(), parse_result);
1624            state.update_document(uri.clone(), doc_state);
1625
1626            // First open: cache should be empty (no old state to preserve)
1627            let doc = state.get_document(&uri).unwrap();
1628            assert_eq!(
1629                doc.cached_versions.len(),
1630                0,
1631                "First open should have empty cache"
1632            );
1633        }
1634
1635        #[tokio::test]
1636        async fn test_preserve_cache_on_parse_failure() {
1637            let state = Arc::new(ServerState::new());
1638            let uri = Uri::from_file_path("/test/Cargo.toml").unwrap();
1639
1640            // Valid initial document
1641            let content1 = r#"[dependencies]
1642serde = "1.0"
1643"#;
1644
1645            let ecosystem = state.ecosystem_registry.get("cargo").unwrap();
1646            let parse_result1 = ecosystem.parse_manifest(content1, &uri).await.unwrap();
1647            let doc_state1 =
1648                DocumentState::new_from_parse_result("cargo", content1.to_string(), parse_result1);
1649            state.update_document(uri.clone(), doc_state1);
1650
1651            // Populate cache
1652            {
1653                let mut doc = state.documents.get_mut(&uri).unwrap();
1654                doc.cached_versions
1655                    .insert("serde".to_string(), "1.0.210".to_string());
1656            }
1657
1658            // Invalid TOML (parse will fail)
1659            let content2 = r#"[dependencies
1660serde = "1.0"
1661"#;
1662
1663            let parse_result2 = ecosystem.parse_manifest(content2, &uri).await.ok();
1664            assert!(
1665                parse_result2.is_none(),
1666                "Parse should fail for invalid TOML"
1667            );
1668
1669            let mut doc_state2 =
1670                DocumentState::new_without_parse_result("cargo", content2.to_string());
1671
1672            if let Some(old_doc) = state.get_document(&uri) {
1673                preserve_cache(&mut doc_state2, &old_doc);
1674            }
1675
1676            state.update_document(uri.clone(), doc_state2);
1677
1678            // Cache should be preserved despite parse failure
1679            let doc = state.get_document(&uri).unwrap();
1680            assert_eq!(
1681                doc.cached_versions.len(),
1682                1,
1683                "Cache should be preserved on parse failure"
1684            );
1685            assert_eq!(
1686                doc.cached_versions.get("serde"),
1687                Some(&"1.0.210".to_string())
1688            );
1689        }
1690
1691        #[test]
1692        fn test_dependency_diff_detects_additions() {
1693            let old: HashSet<String> = ["serde", "tokio"].iter().map(|s| s.to_string()).collect();
1694            let new: HashSet<String> = ["serde", "tokio", "anyhow"]
1695                .iter()
1696                .map(|s| s.to_string())
1697                .collect();
1698
1699            let diff = DependencyDiff::compute(&old, &new);
1700
1701            assert_eq!(diff.added.len(), 1);
1702            assert!(diff.added.contains(&"anyhow".to_string()));
1703            assert!(diff.removed.is_empty());
1704            assert!(diff.needs_fetch());
1705        }
1706
1707        #[test]
1708        fn test_dependency_diff_detects_removals() {
1709            let old: HashSet<String> = ["serde", "tokio", "anyhow"]
1710                .iter()
1711                .map(|s| s.to_string())
1712                .collect();
1713            let new: HashSet<String> = ["serde", "tokio"].iter().map(|s| s.to_string()).collect();
1714
1715            let diff = DependencyDiff::compute(&old, &new);
1716
1717            assert!(diff.added.is_empty());
1718            assert_eq!(diff.removed.len(), 1);
1719            assert!(diff.removed.contains(&"anyhow".to_string()));
1720            assert!(!diff.needs_fetch());
1721        }
1722
1723        #[test]
1724        fn test_dependency_diff_no_changes() {
1725            let old: HashSet<String> = ["serde", "tokio"].iter().map(|s| s.to_string()).collect();
1726            let new: HashSet<String> = ["serde", "tokio"].iter().map(|s| s.to_string()).collect();
1727
1728            let diff = DependencyDiff::compute(&old, &new);
1729
1730            assert!(diff.added.is_empty());
1731            assert!(diff.removed.is_empty());
1732            assert!(!diff.needs_fetch());
1733        }
1734
1735        #[test]
1736        fn test_dependency_diff_empty_to_new() {
1737            let old: HashSet<String> = HashSet::new();
1738            let new: HashSet<String> = ["serde", "tokio"].iter().map(|s| s.to_string()).collect();
1739
1740            let diff = DependencyDiff::compute(&old, &new);
1741
1742            assert_eq!(diff.added.len(), 2);
1743            assert!(diff.removed.is_empty());
1744            assert!(diff.needs_fetch());
1745        }
1746
1747        #[tokio::test]
1748        async fn test_cache_pruned_on_dependency_removal() {
1749            let state = Arc::new(ServerState::new());
1750            let uri = Uri::from_file_path("/test/Cargo.toml").unwrap();
1751
1752            // Initial document with 3 dependencies
1753            let content1 = r#"[dependencies]
1754serde = "1.0"
1755tokio = "1.0"
1756anyhow = "1.0"
1757"#;
1758
1759            let ecosystem = state.ecosystem_registry.get("cargo").unwrap();
1760            let parse_result1 = ecosystem.parse_manifest(content1, &uri).await.unwrap();
1761            let doc_state1 =
1762                DocumentState::new_from_parse_result("cargo", content1.to_string(), parse_result1);
1763            state.update_document(uri.clone(), doc_state1);
1764
1765            // Populate cache for all 3 deps
1766            {
1767                let mut doc = state.documents.get_mut(&uri).unwrap();
1768                doc.cached_versions
1769                    .insert("serde".to_string(), "1.0.210".to_string());
1770                doc.cached_versions
1771                    .insert("tokio".to_string(), "1.40.0".to_string());
1772                doc.cached_versions
1773                    .insert("anyhow".to_string(), "1.0.89".to_string());
1774            }
1775
1776            // Remove anyhow from manifest
1777            let content2 = r#"[dependencies]
1778serde = "1.0"
1779tokio = "1.0"
1780"#;
1781
1782            // Compute diff and apply cache pruning
1783            let old_dep_names: HashSet<String> = ["serde", "tokio", "anyhow"]
1784                .iter()
1785                .map(|s| s.to_string())
1786                .collect();
1787            let new_dep_names: HashSet<String> =
1788                ["serde", "tokio"].iter().map(|s| s.to_string()).collect();
1789            let diff = DependencyDiff::compute(&old_dep_names, &new_dep_names);
1790
1791            let parse_result2 = ecosystem.parse_manifest(content2, &uri).await.unwrap();
1792            let mut doc_state2 =
1793                DocumentState::new_from_parse_result("cargo", content2.to_string(), parse_result2);
1794
1795            if let Some(old_doc) = state.get_document(&uri) {
1796                preserve_cache(&mut doc_state2, &old_doc);
1797            }
1798
1799            // Prune removed dependencies
1800            for removed_dep in &diff.removed {
1801                doc_state2.cached_versions.remove(removed_dep);
1802            }
1803
1804            state.update_document(uri.clone(), doc_state2);
1805
1806            // Verify cache was pruned
1807            let doc = state.get_document(&uri).unwrap();
1808            assert_eq!(
1809                doc.cached_versions.len(),
1810                2,
1811                "anyhow should be removed from cache"
1812            );
1813            assert!(doc.cached_versions.contains_key("serde"));
1814            assert!(doc.cached_versions.contains_key("tokio"));
1815            assert!(!doc.cached_versions.contains_key("anyhow"));
1816        }
1817    }
1818}