1use super::loader::load_document_from_disk;
7use super::state::{DocumentState, ServerState};
8use crate::config::DepsConfig;
9use crate::handlers::diagnostics;
10use crate::progress::RegistryProgress;
11use deps_core::Ecosystem;
12use deps_core::Registry;
13use deps_core::Result;
14use std::collections::{HashMap, HashSet};
15use std::sync::Arc;
16use tokio::sync::RwLock;
17use tokio::task::JoinHandle;
18use tower_lsp_server::Client;
19use tower_lsp_server::ls_types::{MessageType, Uri};
20
21fn preserve_cache(new_state: &mut DocumentState, old_state: &DocumentState) {
24 tracing::trace!(
25 cached = old_state.cached_versions.len(),
26 resolved = old_state.resolved_versions.len(),
27 "preserving version cache"
28 );
29 new_state
30 .cached_versions
31 .clone_from(&old_state.cached_versions);
32 new_state
33 .resolved_versions
34 .clone_from(&old_state.resolved_versions);
35}
36
37#[derive(Debug, Clone, Default)]
39struct DependencyDiff {
40 added: Vec<String>,
41 #[allow(dead_code)]
42 removed: Vec<String>,
43}
44
45impl DependencyDiff {
46 fn compute(old_deps: &HashSet<String>, new_deps: &HashSet<String>) -> Self {
47 Self {
48 added: new_deps.difference(old_deps).cloned().collect(),
49 removed: old_deps.difference(new_deps).cloned().collect(),
50 }
51 }
52
53 #[cfg(test)]
54 fn needs_fetch(&self) -> bool {
55 !self.added.is_empty()
56 }
57}
58
59struct FetchResult {
61 versions: HashMap<String, String>,
63 failed_count: usize,
65}
66
67async fn fetch_latest_versions_parallel(
96 registry: Arc<dyn Registry>,
97 package_names: Vec<String>,
98 progress: Option<&RegistryProgress>,
99 timeout_secs: u64,
100 max_concurrent: usize,
101) -> FetchResult {
102 use futures::stream::{self, StreamExt};
103 use std::time::Duration;
104
105 let total = package_names.len();
106 let fetched = Arc::new(std::sync::atomic::AtomicUsize::new(0));
107 let failed = Arc::new(std::sync::atomic::AtomicUsize::new(0));
108 let timeout = Duration::from_secs(timeout_secs);
109
110 let results: Vec<_> = stream::iter(package_names)
111 .map(|name| {
112 let registry = Arc::clone(®istry);
113 let fetched = Arc::clone(&fetched);
114 let failed = Arc::clone(&failed);
115 async move {
116 let result =
117 tokio::time::timeout(timeout, registry.get_latest_matching(&name, "*")).await;
118
119 let version = match result {
120 Ok(Ok(Some(v))) => Some((name.clone(), v.version_string().to_string())),
121 Ok(Ok(None)) => None,
122 Ok(Err(_)) | Err(_) => {
123 failed.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
124 None
125 }
126 };
127
128 let count = fetched.fetch_add(1, std::sync::atomic::Ordering::Relaxed) + 1;
129 if let Some(progress) = progress {
130 progress.update(count, total).await;
131 }
132
133 version
134 }
135 })
136 .buffer_unordered(max_concurrent)
137 .collect()
138 .await;
139
140 FetchResult {
141 versions: results.into_iter().flatten().collect(),
142 failed_count: failed.load(std::sync::atomic::Ordering::Relaxed),
143 }
144}
145
146pub async fn handle_document_open(
151 uri: Uri,
152 content: String,
153 state: Arc<ServerState>,
154 client: Client,
155 config: Arc<RwLock<DepsConfig>>,
156) -> Result<JoinHandle<()>> {
157 let ecosystem = match state.ecosystem_registry.get_for_uri(&uri) {
159 Some(e) => e,
160 None => {
161 tracing::debug!("No ecosystem handler for {:?}", uri);
162 return Err(deps_core::error::DepsError::UnsupportedEcosystem(format!(
163 "{uri:?}"
164 )));
165 }
166 };
167
168 tracing::info!(
169 "Opening {:?} with ecosystem: {}",
170 uri,
171 ecosystem.display_name()
172 );
173
174 let parse_result = ecosystem.parse_manifest(&content, &uri).await.ok();
176
177 let doc_state = if let Some(pr) = parse_result {
179 DocumentState::new_from_parse_result(ecosystem.id(), content, pr)
180 } else {
181 tracing::debug!("Failed to parse manifest, storing document without parse result");
182 DocumentState::new_without_parse_result(ecosystem.id(), content)
183 };
184
185 state.update_document(uri.clone(), doc_state);
186
187 let cache_config = { config.read().await.cache.clone() };
189
190 let uri_clone = uri.clone();
192 let state_clone = Arc::clone(&state);
193 let ecosystem_clone = Arc::clone(&ecosystem);
194 let client_clone = client.clone();
195
196 let task = tokio::spawn(async move {
197 let resolved_versions =
199 load_resolved_versions(&uri_clone, &state_clone, ecosystem_clone.as_ref()).await;
200
201 if !resolved_versions.is_empty()
203 && let Some(mut doc) = state_clone.documents.get_mut(&uri_clone)
204 {
205 doc.update_resolved_versions(resolved_versions.clone());
206 doc.update_cached_versions(resolved_versions.clone());
208 }
209
210 let dep_names: Vec<String> = {
212 let doc = match state_clone.get_document(&uri_clone) {
213 Some(d) => d,
214 None => return,
215 };
216 let parse_result = match doc.parse_result() {
217 Some(p) => p,
218 None => return,
219 };
220 parse_result
221 .dependencies()
222 .into_iter()
223 .map(|d| d.name().to_string())
224 .collect()
225 };
226
227 if let Some(mut doc) = state_clone.documents.get_mut(&uri_clone) {
229 doc.set_loading();
230 }
231
232 let progress =
233 RegistryProgress::start(client_clone.clone(), uri_clone.as_str(), dep_names.len())
234 .await
235 .ok(); let registry = ecosystem_clone.registry();
239 let fetch_result = fetch_latest_versions_parallel(
240 registry,
241 dep_names,
242 progress.as_ref(),
243 cache_config.fetch_timeout_secs,
244 cache_config.max_concurrent_fetches,
245 )
246 .await;
247
248 let success = !fetch_result.versions.is_empty();
249
250 if let Some(mut doc) = state_clone.documents.get_mut(&uri_clone) {
252 doc.update_cached_versions(fetch_result.versions);
253 if success {
254 doc.set_loaded();
255 } else {
256 doc.set_failed();
257 }
258 }
259
260 if let Some(progress) = progress {
262 progress.end(success).await;
263 }
264
265 if fetch_result.failed_count > 0 {
267 client_clone
268 .show_message(
269 tower_lsp_server::ls_types::MessageType::WARNING,
270 format!(
271 "deps-lsp: {} package(s) failed to fetch (timeout or network error)",
272 fetch_result.failed_count
273 ),
274 )
275 .await;
276 }
277
278 if let Err(e) = client_clone.inlay_hint_refresh().await {
281 tracing::debug!("inlay_hint_refresh not supported: {:?}", e);
282 }
283
284 let diags =
286 diagnostics::generate_diagnostics_internal(Arc::clone(&state_clone), &uri_clone).await;
287
288 client_clone
289 .publish_diagnostics(uri_clone.clone(), diags, None)
290 .await;
291 });
292
293 Ok(task)
294}
295
296pub async fn handle_document_change(
301 uri: Uri,
302 content: String,
303 state: Arc<ServerState>,
304 client: Client,
305 config: Arc<RwLock<DepsConfig>>,
306) -> Result<JoinHandle<()>> {
307 let ecosystem = match state.ecosystem_registry.get_for_uri(&uri) {
309 Some(e) => e,
310 None => {
311 tracing::debug!("No ecosystem handler for {:?}", uri);
312 return Err(deps_core::error::DepsError::UnsupportedEcosystem(format!(
313 "{uri:?}"
314 )));
315 }
316 };
317
318 let old_dep_names: HashSet<String> =
320 state.get_document(&uri).map_or_else(HashSet::new, |doc| {
321 doc.parse_result()
322 .map(|pr| {
323 pr.dependencies()
324 .into_iter()
325 .map(|d| d.name().to_string())
326 .collect()
327 })
328 .unwrap_or_default()
329 });
330
331 let parse_result = ecosystem.parse_manifest(&content, &uri).await.ok();
333
334 let new_dep_names: HashSet<String> = parse_result
336 .as_ref()
337 .map(|pr| {
338 pr.dependencies()
339 .into_iter()
340 .map(|d| d.name().to_string())
341 .collect()
342 })
343 .unwrap_or_default();
344
345 let diff = DependencyDiff::compute(&old_dep_names, &new_dep_names);
347 tracing::debug!(
348 added = diff.added.len(),
349 removed = diff.removed.len(),
350 "dependency diff"
351 );
352
353 let mut doc_state = if let Some(pr) = parse_result {
354 DocumentState::new_from_parse_result(ecosystem.id(), content, pr)
355 } else {
356 tracing::debug!("Failed to parse manifest, storing document without parse result");
357 DocumentState::new_without_parse_result(ecosystem.id(), content)
358 };
359
360 if let Some(old_doc) = state.get_document(&uri) {
361 preserve_cache(&mut doc_state, &old_doc);
362 }
363
364 for removed_dep in &diff.removed {
366 doc_state.cached_versions.remove(removed_dep);
367 doc_state.resolved_versions.remove(removed_dep);
368 }
369
370 state.update_document(uri.clone(), doc_state);
371
372 let cache_config = { config.read().await.cache.clone() };
374
375 let uri_clone = uri.clone();
377 let state_clone = Arc::clone(&state);
378 let ecosystem_clone = Arc::clone(&ecosystem);
379 let client_clone = client.clone();
380 let deps_to_fetch = diff.added;
381
382 let task = tokio::spawn(async move {
383 tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
385
386 let resolved_versions =
388 load_resolved_versions(&uri_clone, &state_clone, ecosystem_clone.as_ref()).await;
389
390 if !resolved_versions.is_empty()
393 && let Some(mut doc) = state_clone.documents.get_mut(&uri_clone)
394 {
395 doc.update_resolved_versions(resolved_versions.clone());
396 }
397
398 if deps_to_fetch.is_empty() {
400 tracing::debug!("no new dependencies, skipping registry fetch");
401
402 if let Some(mut doc) = state_clone.documents.get_mut(&uri_clone) {
403 doc.set_loaded();
404 }
405
406 if let Err(e) = client_clone.inlay_hint_refresh().await {
407 tracing::debug!("inlay_hint_refresh not supported: {:?}", e);
408 }
409
410 let diags =
411 diagnostics::generate_diagnostics_internal(Arc::clone(&state_clone), &uri_clone)
412 .await;
413 client_clone
414 .publish_diagnostics(uri_clone.clone(), diags, None)
415 .await;
416 return;
417 }
418
419 tracing::info!(
420 count = deps_to_fetch.len(),
421 "fetching versions for new dependencies"
422 );
423
424 if let Some(mut doc) = state_clone.documents.get_mut(&uri_clone) {
426 doc.set_loading();
427 }
428
429 let progress = RegistryProgress::start(
430 client_clone.clone(),
431 uri_clone.as_str(),
432 deps_to_fetch.len(),
433 )
434 .await
435 .ok();
436
437 let registry = ecosystem_clone.registry();
439 let fetch_result = fetch_latest_versions_parallel(
440 registry,
441 deps_to_fetch,
442 progress.as_ref(),
443 cache_config.fetch_timeout_secs,
444 cache_config.max_concurrent_fetches,
445 )
446 .await;
447
448 let success = !fetch_result.versions.is_empty();
449
450 if let Some(mut doc) = state_clone.documents.get_mut(&uri_clone) {
452 for (name, version) in fetch_result.versions {
453 doc.cached_versions.insert(name, version);
454 }
455 if success {
456 doc.set_loaded();
457 } else {
458 doc.set_failed();
459 }
460 }
461
462 if let Some(progress) = progress {
463 progress.end(success).await;
464 }
465
466 if fetch_result.failed_count > 0 {
468 client_clone
469 .show_message(
470 tower_lsp_server::ls_types::MessageType::WARNING,
471 format!(
472 "deps-lsp: {} package(s) failed to fetch (timeout or network error)",
473 fetch_result.failed_count
474 ),
475 )
476 .await;
477 }
478
479 if let Err(e) = client_clone.inlay_hint_refresh().await {
480 tracing::debug!("inlay_hint_refresh not supported: {:?}", e);
481 }
482
483 let diags =
484 diagnostics::generate_diagnostics_internal(Arc::clone(&state_clone), &uri_clone).await;
485
486 client_clone
487 .publish_diagnostics(uri_clone.clone(), diags, None)
488 .await;
489 });
490
491 Ok(task)
492}
493
494async fn load_resolved_versions(
500 uri: &Uri,
501 state: &ServerState,
502 ecosystem: &dyn Ecosystem,
503) -> HashMap<String, String> {
504 let lock_provider = match ecosystem.lockfile_provider() {
505 Some(p) => p,
506 None => {
507 tracing::debug!("No lock file provider for ecosystem {}", ecosystem.id());
508 return HashMap::new();
509 }
510 };
511
512 let lockfile_path = match lock_provider.locate_lockfile(uri) {
513 Some(path) => path,
514 None => {
515 tracing::debug!("No lock file found for {:?}", uri);
516 return HashMap::new();
517 }
518 };
519
520 match state
521 .lockfile_cache
522 .get_or_parse(lock_provider.as_ref(), &lockfile_path)
523 .await
524 {
525 Ok(resolved) => {
526 tracing::info!(
527 "Loaded {} resolved versions from {}",
528 resolved.len(),
529 lockfile_path.display()
530 );
531 resolved
532 .iter()
533 .map(|(name, pkg)| (name.clone(), pkg.version.clone()))
534 .collect()
535 }
536 Err(e) => {
537 tracing::warn!("Failed to parse lock file: {}", e);
538 HashMap::new()
539 }
540 }
541}
542
543pub async fn ensure_document_loaded(
590 uri: &Uri,
591 state: Arc<ServerState>,
592 client: Client,
593 config: Arc<RwLock<DepsConfig>>,
594) -> bool {
595 if state.get_document(uri).is_some() {
597 tracing::debug!("Document already loaded: {:?}", uri);
598 return true;
599 }
600
601 let cold_start_config = { config.read().await.cold_start.clone() };
603
604 if !cold_start_config.enabled {
606 tracing::debug!("Cold start disabled via configuration");
607 return false;
608 }
609
610 if !state.cold_start_limiter.allow_cold_start(uri) {
612 tracing::warn!("Cold start rate limited: {:?}", uri);
613 return false;
614 }
615
616 if state.ecosystem_registry.get_for_uri(uri).is_none() {
618 tracing::debug!("Unsupported file type: {:?}", uri);
619 return false;
620 }
621
622 tracing::info!("Loading document from disk (cold start): {:?}", uri);
624 let content = match load_document_from_disk(uri).await {
625 Ok(c) => c,
626 Err(e) => {
627 tracing::warn!("Failed to load document {:?}: {}", uri, e);
628 client
629 .log_message(MessageType::WARNING, format!("Could not load file: {e}"))
630 .await;
631 return false;
632 }
633 };
634
635 match handle_document_open(
637 uri.clone(),
638 content,
639 Arc::clone(&state),
640 client.clone(),
641 Arc::clone(&config),
642 )
643 .await
644 {
645 Ok(task) => {
646 state.spawn_background_task(uri.clone(), task).await;
647 tracing::info!("Document loaded successfully from disk: {:?}", uri);
648 true
649 }
650 Err(e) => {
651 tracing::warn!("Failed to process loaded document {:?}: {}", uri, e);
652 false
653 }
654 }
655}
656
657#[cfg(test)]
658mod tests {
659 use super::*;
660
661 #[test]
664 fn test_ecosystem_registry_unknown_file() {
665 let state = ServerState::new();
666 let unknown_uri =
667 tower_lsp_server::ls_types::Uri::from_file_path("/test/unknown.txt").unwrap();
668 assert!(state.ecosystem_registry.get_for_uri(&unknown_uri).is_none());
669 }
670
671 #[tokio::test]
672 async fn test_ensure_document_loaded_unsupported_file_check() {
673 let state = Arc::new(ServerState::new());
675 let uri = Uri::from_file_path("/test/README.md").unwrap();
676
677 assert!(
679 state.ecosystem_registry.get_for_uri(&uri).is_none(),
680 "README.md should not have an ecosystem handler"
681 );
682
683 }
686
687 #[tokio::test]
688 async fn test_ensure_document_loaded_file_not_found_check() {
689 use super::load_document_from_disk;
691
692 let uri = Uri::from_file_path("/nonexistent/Cargo.toml").unwrap();
693 let result = load_document_from_disk(&uri).await;
694
695 assert!(result.is_err(), "Should fail for missing files");
696
697 }
699
700 #[tokio::test]
701 async fn test_fetch_latest_versions_parallel_with_timeout() {
702 use async_trait::async_trait;
703 use deps_core::{Metadata, Registry, Version};
704 use std::any::Any;
705 use std::time::Duration;
706
707 struct TimeoutRegistry;
709
710 #[async_trait]
711 impl Registry for TimeoutRegistry {
712 async fn get_versions(&self, _name: &str) -> deps_core::Result<Vec<Box<dyn Version>>> {
713 tokio::time::sleep(Duration::from_secs(10)).await;
715 Ok(vec![])
716 }
717
718 async fn get_latest_matching(
719 &self,
720 _name: &str,
721 _req: &str,
722 ) -> deps_core::Result<Option<Box<dyn Version>>> {
723 tokio::time::sleep(Duration::from_secs(10)).await;
725 Ok(None)
726 }
727
728 async fn search(
729 &self,
730 _query: &str,
731 _limit: usize,
732 ) -> deps_core::Result<Vec<Box<dyn Metadata>>> {
733 Ok(vec![])
734 }
735
736 fn package_url(&self, name: &str) -> String {
737 format!("https://example.com/{}", name)
738 }
739
740 fn as_any(&self) -> &dyn Any {
741 self
742 }
743 }
744
745 let registry: Arc<dyn Registry> = Arc::new(TimeoutRegistry);
746 let packages = vec!["slow-package".to_string()];
747
748 let result = fetch_latest_versions_parallel(registry, packages, None, 1, 10).await;
750
751 assert!(result.versions.is_empty(), "Slow package should timeout");
753 assert_eq!(result.failed_count, 1, "Should track 1 failed package");
754 }
755
756 #[tokio::test]
757 async fn test_fetch_latest_versions_parallel_fast_packages_not_blocked() {
758 use async_trait::async_trait;
759 use deps_core::{Metadata, Registry, Version};
760 use std::any::Any;
761 use std::time::Duration;
762
763 struct MixedRegistry;
765
766 #[async_trait]
767 impl Registry for MixedRegistry {
768 async fn get_versions(&self, name: &str) -> deps_core::Result<Vec<Box<dyn Version>>> {
769 if name == "slow-package" {
770 tokio::time::sleep(Duration::from_secs(10)).await;
772 }
773 Ok(vec![])
775 }
776
777 async fn get_latest_matching(
778 &self,
779 name: &str,
780 _req: &str,
781 ) -> deps_core::Result<Option<Box<dyn Version>>> {
782 if name == "slow-package" {
783 tokio::time::sleep(Duration::from_secs(10)).await;
785 }
786 Ok(None)
788 }
789
790 async fn search(
791 &self,
792 _query: &str,
793 _limit: usize,
794 ) -> deps_core::Result<Vec<Box<dyn Metadata>>> {
795 Ok(vec![])
796 }
797
798 fn package_url(&self, name: &str) -> String {
799 format!("https://example.com/{}", name)
800 }
801
802 fn as_any(&self) -> &dyn Any {
803 self
804 }
805 }
806
807 let registry: Arc<dyn Registry> = Arc::new(MixedRegistry);
808 let packages = vec!["slow-package".to_string(), "fast-package".to_string()];
809
810 let start = std::time::Instant::now();
811 let result = fetch_latest_versions_parallel(registry, packages, None, 1, 10).await;
812 let elapsed = start.elapsed();
813
814 assert!(
816 elapsed < Duration::from_secs(3),
817 "Should not wait for slow package: {:?}",
818 elapsed
819 );
820
821 assert!(
823 result.versions.is_empty(),
824 "No versions returned (test registry returns empty)"
825 );
826 assert_eq!(
827 result.failed_count, 1,
828 "Slow package should be marked as failed"
829 );
830 }
831
832 #[tokio::test]
833 async fn test_fetch_latest_versions_parallel_concurrency_limit() {
834 use async_trait::async_trait;
835 use deps_core::{Metadata, Registry, Version};
836 use std::any::Any;
837 use std::sync::atomic::{AtomicUsize, Ordering};
838 use std::time::Duration;
839
840 struct ConcurrencyTrackingRegistry {
842 current: Arc<AtomicUsize>,
843 max_seen: Arc<AtomicUsize>,
844 }
845
846 #[async_trait]
847 impl Registry for ConcurrencyTrackingRegistry {
848 async fn get_versions(&self, _name: &str) -> deps_core::Result<Vec<Box<dyn Version>>> {
849 let current = self.current.fetch_add(1, Ordering::SeqCst) + 1;
851
852 self.max_seen.fetch_max(current, Ordering::SeqCst);
854
855 tokio::time::sleep(Duration::from_millis(50)).await;
857
858 self.current.fetch_sub(1, Ordering::SeqCst);
860
861 Ok(vec![])
862 }
863
864 async fn get_latest_matching(
865 &self,
866 _name: &str,
867 _req: &str,
868 ) -> deps_core::Result<Option<Box<dyn Version>>> {
869 let current = self.current.fetch_add(1, Ordering::SeqCst) + 1;
871
872 self.max_seen.fetch_max(current, Ordering::SeqCst);
874
875 tokio::time::sleep(Duration::from_millis(50)).await;
877
878 self.current.fetch_sub(1, Ordering::SeqCst);
880
881 Ok(None)
882 }
883
884 async fn search(
885 &self,
886 _query: &str,
887 _limit: usize,
888 ) -> deps_core::Result<Vec<Box<dyn Metadata>>> {
889 Ok(vec![])
890 }
891
892 fn package_url(&self, name: &str) -> String {
893 format!("https://example.com/{}", name)
894 }
895
896 fn as_any(&self) -> &dyn Any {
897 self
898 }
899 }
900
901 let current = Arc::new(AtomicUsize::new(0));
902 let max_seen = Arc::new(AtomicUsize::new(0));
903
904 let registry: Arc<dyn Registry> = Arc::new(ConcurrencyTrackingRegistry {
905 current: Arc::clone(¤t),
906 max_seen: Arc::clone(&max_seen),
907 });
908
909 let packages: Vec<String> = (0..50).map(|i| format!("package-{}", i)).collect();
911
912 fetch_latest_versions_parallel(registry, packages, None, 5, 20).await;
913
914 let max = max_seen.load(Ordering::SeqCst);
916 assert!(
917 max <= 22,
918 "Concurrency limit violated: {} concurrent requests (limit: 20)",
919 max
920 );
921 }
922
923 #[tokio::test]
924 async fn test_fetch_partial_success_with_mixed_outcomes() {
925 use async_trait::async_trait;
926 use deps_core::{Metadata, Registry, Version};
927 use std::any::Any;
928 use std::time::Duration;
929
930 #[derive(Debug)]
932 struct MockVersion {
933 version: String,
934 }
935
936 impl Version for MockVersion {
937 fn version_string(&self) -> &str {
938 &self.version
939 }
940
941 fn is_prerelease(&self) -> bool {
942 false
943 }
944
945 fn is_yanked(&self) -> bool {
946 false
947 }
948
949 fn as_any(&self) -> &dyn Any {
950 self
951 }
952 }
953
954 struct MixedOutcomeRegistry;
959
960 #[async_trait]
961 impl Registry for MixedOutcomeRegistry {
962 async fn get_versions(&self, name: &str) -> deps_core::Result<Vec<Box<dyn Version>>> {
963 match name {
964 "package-fast" => {
965 Ok(vec![Box::new(MockVersion {
967 version: "1.0.0".to_string(),
968 })])
969 }
970 "package-slow" => {
971 tokio::time::sleep(Duration::from_secs(10)).await;
973 Ok(vec![])
974 }
975 "package-error" => {
976 Err(deps_core::error::DepsError::CacheError(
978 "Mock registry error".to_string(),
979 ))
980 }
981 _ => Ok(vec![]),
982 }
983 }
984
985 async fn get_latest_matching(
986 &self,
987 name: &str,
988 _req: &str,
989 ) -> deps_core::Result<Option<Box<dyn Version>>> {
990 match name {
991 "package-fast" => Ok(Some(Box::new(MockVersion {
992 version: "1.0.0".to_string(),
993 }))),
994 "package-slow" => {
995 tokio::time::sleep(Duration::from_secs(10)).await;
996 Ok(None)
997 }
998 "package-error" => Err(deps_core::error::DepsError::CacheError(
999 "Mock registry error".to_string(),
1000 )),
1001 _ => Ok(None),
1002 }
1003 }
1004
1005 async fn search(
1006 &self,
1007 _query: &str,
1008 _limit: usize,
1009 ) -> deps_core::Result<Vec<Box<dyn Metadata>>> {
1010 Ok(vec![])
1011 }
1012
1013 fn package_url(&self, name: &str) -> String {
1014 format!("https://example.com/{}", name)
1015 }
1016
1017 fn as_any(&self) -> &dyn Any {
1018 self
1019 }
1020 }
1021
1022 let registry: Arc<dyn Registry> = Arc::new(MixedOutcomeRegistry);
1023 let packages = vec![
1024 "package-fast".to_string(),
1025 "package-slow".to_string(),
1026 "package-error".to_string(),
1027 ];
1028
1029 let result = fetch_latest_versions_parallel(registry, packages, None, 1, 10).await;
1031
1032 assert_eq!(
1034 result.versions.len(),
1035 1,
1036 "Should have exactly 1 successful package"
1037 );
1038 assert_eq!(
1039 result.versions.get("package-fast"),
1040 Some(&"1.0.0".to_string()),
1041 "Fast package should have correct version"
1042 );
1043 assert!(
1044 !result.versions.contains_key("package-slow"),
1045 "Slow package should not be in results (timeout)"
1046 );
1047 assert!(
1048 !result.versions.contains_key("package-error"),
1049 "Error package should not be in results"
1050 );
1051 }
1052
1053 #[tokio::test]
1054 async fn test_fetch_registry_error_handled() {
1055 use async_trait::async_trait;
1056 use deps_core::{Metadata, Registry, Version};
1057 use std::any::Any;
1058
1059 struct ErrorRegistry;
1061
1062 #[async_trait]
1063 impl Registry for ErrorRegistry {
1064 async fn get_versions(&self, name: &str) -> deps_core::Result<Vec<Box<dyn Version>>> {
1065 Err(deps_core::error::DepsError::CacheError(format!(
1066 "Failed to fetch package: {}",
1067 name
1068 )))
1069 }
1070
1071 async fn get_latest_matching(
1072 &self,
1073 name: &str,
1074 _req: &str,
1075 ) -> deps_core::Result<Option<Box<dyn Version>>> {
1076 Err(deps_core::error::DepsError::CacheError(format!(
1077 "Failed to fetch package: {}",
1078 name
1079 )))
1080 }
1081
1082 async fn search(
1083 &self,
1084 _query: &str,
1085 _limit: usize,
1086 ) -> deps_core::Result<Vec<Box<dyn Metadata>>> {
1087 Ok(vec![])
1088 }
1089
1090 fn package_url(&self, name: &str) -> String {
1091 format!("https://example.com/{}", name)
1092 }
1093
1094 fn as_any(&self) -> &dyn Any {
1095 self
1096 }
1097 }
1098
1099 let registry: Arc<dyn Registry> = Arc::new(ErrorRegistry);
1100 let packages = vec![
1101 "package-1".to_string(),
1102 "package-2".to_string(),
1103 "package-3".to_string(),
1104 ];
1105
1106 let result = fetch_latest_versions_parallel(registry, packages, None, 5, 10).await;
1108
1109 assert!(
1111 result.versions.is_empty(),
1112 "All packages with errors should be omitted from results"
1113 );
1114 assert_eq!(
1115 result.failed_count, 3,
1116 "All 3 packages should be marked as failed"
1117 );
1118 }
1119
1120 #[cfg(feature = "cargo")]
1122 mod cargo_tests {
1123 use super::*;
1124
1125 #[test]
1126 fn test_ecosystem_registry_lookup() {
1127 let state = ServerState::new();
1128 let cargo_uri =
1129 tower_lsp_server::ls_types::Uri::from_file_path("/test/Cargo.toml").unwrap();
1130 assert!(state.ecosystem_registry.get_for_uri(&cargo_uri).is_some());
1131 }
1132
1133 #[tokio::test]
1134 async fn test_document_parsing() {
1135 let state = Arc::new(ServerState::new());
1136 let uri = tower_lsp_server::ls_types::Uri::from_file_path("/test/Cargo.toml").unwrap();
1137 let content = r#"[dependencies]
1138serde = "1.0"
1139"#;
1140
1141 let ecosystem = state
1142 .ecosystem_registry
1143 .get_for_uri(&uri)
1144 .expect("Cargo ecosystem not found");
1145
1146 let parse_result = ecosystem.parse_manifest(content, &uri).await;
1147 assert!(parse_result.is_ok());
1148
1149 let doc_state = DocumentState::new_from_parse_result(
1150 "cargo",
1151 content.to_string(),
1152 parse_result.unwrap(),
1153 );
1154 state.update_document(uri.clone(), doc_state);
1155
1156 assert_eq!(state.document_count(), 1);
1157 let doc = state.get_document(&uri).unwrap();
1158 assert_eq!(doc.ecosystem_id, "cargo");
1159 }
1160
1161 #[tokio::test]
1162 async fn test_document_stored_even_when_parsing_fails() {
1163 let state = Arc::new(ServerState::new());
1164 let uri = tower_lsp_server::ls_types::Uri::from_file_path("/test/Cargo.toml").unwrap();
1165 let content = r#"[dependencies
1167serde = "1.0"
1168"#;
1169
1170 let ecosystem = state
1171 .ecosystem_registry
1172 .get_for_uri(&uri)
1173 .expect("Cargo ecosystem not found");
1174
1175 let parse_result = ecosystem.parse_manifest(content, &uri).await.ok();
1177 assert!(
1178 parse_result.is_none(),
1179 "Parsing should fail for invalid TOML"
1180 );
1181
1182 let doc_state = if let Some(pr) = parse_result {
1184 DocumentState::new_from_parse_result("cargo", content.to_string(), pr)
1185 } else {
1186 DocumentState::new_without_parse_result("cargo", content.to_string())
1187 };
1188
1189 state.update_document(uri.clone(), doc_state);
1190
1191 let doc = state.get_document(&uri);
1193 assert!(
1194 doc.is_some(),
1195 "Document should be stored even when parsing fails"
1196 );
1197
1198 let doc = doc.unwrap();
1199 assert_eq!(doc.ecosystem_id, "cargo");
1200 assert_eq!(doc.content, content);
1201 assert!(
1202 doc.parse_result().is_none(),
1203 "Parse result should be None for failed parse"
1204 );
1205 }
1206
1207 #[tokio::test]
1208 async fn test_ensure_document_loaded_fast_path() {
1209 let state = Arc::new(ServerState::new());
1211 let uri = Uri::from_file_path("/test/Cargo.toml").unwrap();
1212 let content = r#"[dependencies]
1213serde = "1.0""#;
1214
1215 let ecosystem = state
1217 .ecosystem_registry
1218 .get_for_uri(&uri)
1219 .expect("Cargo ecosystem");
1220 let parse_result = ecosystem.parse_manifest(content, &uri).await.unwrap();
1221 let doc_state =
1222 DocumentState::new_from_parse_result("cargo", content.to_string(), parse_result);
1223 state.update_document(uri.clone(), doc_state);
1224
1225 assert!(
1227 state.get_document(&uri).is_some(),
1228 "Document should exist in state"
1229 );
1230 assert_eq!(state.document_count(), 1, "Document count should be 1");
1231
1232 }
1236
1237 #[tokio::test]
1238 async fn test_ensure_document_loaded_successful_disk_load() {
1239 use super::super::load_document_from_disk;
1241 use std::fs;
1242 use tempfile::TempDir;
1243
1244 let temp_dir = TempDir::new().unwrap();
1246 let cargo_toml_path = temp_dir.path().join("Cargo.toml");
1247 let content = r#"[package]
1248name = "test"
1249version = "0.1.0"
1250
1251[dependencies]
1252serde = "1.0"
1253"#;
1254 fs::write(&cargo_toml_path, content).unwrap();
1255
1256 let uri = Uri::from_file_path(&cargo_toml_path).unwrap();
1257
1258 let loaded_content = load_document_from_disk(&uri).await.unwrap();
1260 assert_eq!(loaded_content, content);
1261
1262 let state = Arc::new(ServerState::new());
1264 let ecosystem = state
1265 .ecosystem_registry
1266 .get_for_uri(&uri)
1267 .expect("Cargo ecosystem");
1268 let parse_result = ecosystem.parse_manifest(&loaded_content, &uri).await;
1269 assert!(parse_result.is_ok(), "Should parse successfully");
1270
1271 }
1273
1274 #[tokio::test]
1275 async fn test_ensure_document_loaded_idempotent_check() {
1276 let state = Arc::new(ServerState::new());
1278 let uri = Uri::from_file_path("/test/Cargo.toml").unwrap();
1279 let content = r#"[dependencies]
1280serde = "1.0""#;
1281
1282 let ecosystem = state
1283 .ecosystem_registry
1284 .get_for_uri(&uri)
1285 .expect("Cargo ecosystem");
1286
1287 let parse_result1 = ecosystem.parse_manifest(content, &uri).await.unwrap();
1289 let parse_result2 = ecosystem.parse_manifest(content, &uri).await.unwrap();
1290
1291 let doc_state1 =
1293 DocumentState::new_from_parse_result("cargo", content.to_string(), parse_result1);
1294 state.update_document(uri.clone(), doc_state1);
1295 assert_eq!(state.document_count(), 1);
1296
1297 let doc_state2 =
1299 DocumentState::new_from_parse_result("cargo", content.to_string(), parse_result2);
1300 state.update_document(uri.clone(), doc_state2);
1301 assert_eq!(
1302 state.document_count(),
1303 1,
1304 "Should still have only 1 document"
1305 );
1306 }
1307 }
1308
1309 #[cfg(feature = "npm")]
1311 mod npm_tests {
1312 use super::*;
1313
1314 #[test]
1315 fn test_ecosystem_registry_lookup() {
1316 let state = ServerState::new();
1317 let npm_uri =
1318 tower_lsp_server::ls_types::Uri::from_file_path("/test/package.json").unwrap();
1319 assert!(state.ecosystem_registry.get_for_uri(&npm_uri).is_some());
1320 }
1321
1322 #[tokio::test]
1323 async fn test_document_parsing() {
1324 let state = Arc::new(ServerState::new());
1325 let uri =
1326 tower_lsp_server::ls_types::Uri::from_file_path("/test/package.json").unwrap();
1327 let content = r#"{"dependencies": {"express": "^4.18.0"}}"#;
1328
1329 let ecosystem = state
1330 .ecosystem_registry
1331 .get_for_uri(&uri)
1332 .expect("npm ecosystem not found");
1333
1334 let parse_result = ecosystem.parse_manifest(content, &uri).await;
1335 assert!(parse_result.is_ok());
1336
1337 let doc_state = DocumentState::new_from_parse_result(
1338 "npm",
1339 content.to_string(),
1340 parse_result.unwrap(),
1341 );
1342 state.update_document(uri.clone(), doc_state);
1343
1344 let doc = state.get_document(&uri).unwrap();
1345 assert_eq!(doc.ecosystem_id, "npm");
1346 }
1347 }
1348
1349 #[cfg(feature = "pypi")]
1351 mod pypi_tests {
1352 use super::*;
1353
1354 #[test]
1355 fn test_ecosystem_registry_lookup() {
1356 let state = ServerState::new();
1357 let pypi_uri =
1358 tower_lsp_server::ls_types::Uri::from_file_path("/test/pyproject.toml").unwrap();
1359 assert!(state.ecosystem_registry.get_for_uri(&pypi_uri).is_some());
1360 }
1361
1362 #[tokio::test]
1363 async fn test_document_parsing() {
1364 let state = Arc::new(ServerState::new());
1365 let uri =
1366 tower_lsp_server::ls_types::Uri::from_file_path("/test/pyproject.toml").unwrap();
1367 let content = r#"[project]
1368dependencies = ["requests>=2.0.0"]
1369"#;
1370
1371 let ecosystem = state
1372 .ecosystem_registry
1373 .get_for_uri(&uri)
1374 .expect("pypi ecosystem not found");
1375
1376 let parse_result = ecosystem.parse_manifest(content, &uri).await;
1377 assert!(parse_result.is_ok());
1378
1379 let doc_state = DocumentState::new_from_parse_result(
1380 "pypi",
1381 content.to_string(),
1382 parse_result.unwrap(),
1383 );
1384 state.update_document(uri.clone(), doc_state);
1385
1386 let doc = state.get_document(&uri).unwrap();
1387 assert_eq!(doc.ecosystem_id, "pypi");
1388 }
1389 }
1390
1391 #[cfg(feature = "go")]
1393 mod go_tests {
1394 use super::*;
1395
1396 #[test]
1397 fn test_ecosystem_registry_lookup() {
1398 let state = ServerState::new();
1399 let go_uri = tower_lsp_server::ls_types::Uri::from_file_path("/test/go.mod").unwrap();
1400 assert!(state.ecosystem_registry.get_for_uri(&go_uri).is_some());
1401 }
1402
1403 #[tokio::test]
1404 async fn test_document_parsing() {
1405 let state = Arc::new(ServerState::new());
1406 let uri = tower_lsp_server::ls_types::Uri::from_file_path("/test/go.mod").unwrap();
1407 let content = r"module example.com/mymodule
1408
1409go 1.21
1410
1411require github.com/gorilla/mux v1.8.0
1412";
1413
1414 let ecosystem = state
1415 .ecosystem_registry
1416 .get_for_uri(&uri)
1417 .expect("go ecosystem not found");
1418
1419 let parse_result = ecosystem.parse_manifest(content, &uri).await;
1420 assert!(parse_result.is_ok());
1421
1422 let doc_state = DocumentState::new_from_parse_result(
1423 "go",
1424 content.to_string(),
1425 parse_result.unwrap(),
1426 );
1427 state.update_document(uri.clone(), doc_state);
1428
1429 let doc = state.get_document(&uri).unwrap();
1430 assert_eq!(doc.ecosystem_id, "go");
1431 }
1432 }
1433
1434 #[cfg(feature = "cargo")]
1436 mod incremental_fetch_tests {
1437 use super::*;
1438
1439 #[tokio::test]
1440 async fn test_preserve_cached_versions_on_change() {
1441 let state = Arc::new(ServerState::new());
1442 let uri = Uri::from_file_path("/test/Cargo.toml").unwrap();
1443
1444 let content1 = r#"[dependencies]
1446serde = "1.0"
1447tokio = "1.0"
1448"#;
1449
1450 let ecosystem = state.ecosystem_registry.get("cargo").unwrap();
1451 let parse_result1 = ecosystem.parse_manifest(content1, &uri).await.unwrap();
1452 let doc_state1 =
1453 DocumentState::new_from_parse_result("cargo", content1.to_string(), parse_result1);
1454 state.update_document(uri.clone(), doc_state1);
1455
1456 {
1458 let mut doc = state.documents.get_mut(&uri).unwrap();
1459 doc.cached_versions
1460 .insert("serde".to_string(), "1.0.210".to_string());
1461 doc.cached_versions
1462 .insert("tokio".to_string(), "1.40.0".to_string());
1463 doc.resolved_versions
1464 .insert("serde".to_string(), "1.0.195".to_string());
1465 doc.resolved_versions
1466 .insert("tokio".to_string(), "1.35.0".to_string());
1467 }
1468
1469 {
1471 let doc = state.get_document(&uri).unwrap();
1472 assert_eq!(doc.cached_versions.len(), 2);
1473 assert_eq!(doc.resolved_versions.len(), 2);
1474 }
1475
1476 let content2 = r#"[dependencies]
1478serde = "1.0.210"
1479tokio = "1.0"
1480"#;
1481
1482 let parse_result2 = ecosystem.parse_manifest(content2, &uri).await.unwrap();
1483 let mut doc_state2 =
1484 DocumentState::new_from_parse_result("cargo", content2.to_string(), parse_result2);
1485
1486 if let Some(old_doc) = state.get_document(&uri) {
1487 preserve_cache(&mut doc_state2, &old_doc);
1488 }
1489
1490 state.update_document(uri.clone(), doc_state2);
1491
1492 {
1494 let doc = state.get_document(&uri).unwrap();
1495 assert_eq!(
1496 doc.cached_versions.len(),
1497 2,
1498 "Cached versions should be preserved"
1499 );
1500 assert_eq!(
1501 doc.cached_versions.get("serde"),
1502 Some(&"1.0.210".to_string()),
1503 "serde cache preserved"
1504 );
1505 assert_eq!(
1506 doc.cached_versions.get("tokio"),
1507 Some(&"1.40.0".to_string()),
1508 "tokio cache preserved"
1509 );
1510 assert_eq!(
1511 doc.resolved_versions.len(),
1512 2,
1513 "Resolved versions should be preserved"
1514 );
1515 }
1516 }
1517
1518 #[tokio::test]
1519 async fn test_first_open_has_empty_cache() {
1520 let state = Arc::new(ServerState::new());
1521 let uri = Uri::from_file_path("/test/Cargo.toml").unwrap();
1522
1523 let content = r#"[dependencies]
1524serde = "1.0"
1525"#;
1526
1527 let ecosystem = state.ecosystem_registry.get("cargo").unwrap();
1528 let parse_result = ecosystem.parse_manifest(content, &uri).await.unwrap();
1529 let doc_state =
1530 DocumentState::new_from_parse_result("cargo", content.to_string(), parse_result);
1531 state.update_document(uri.clone(), doc_state);
1532
1533 let doc = state.get_document(&uri).unwrap();
1535 assert_eq!(
1536 doc.cached_versions.len(),
1537 0,
1538 "First open should have empty cache"
1539 );
1540 }
1541
1542 #[tokio::test]
1543 async fn test_preserve_cache_on_parse_failure() {
1544 let state = Arc::new(ServerState::new());
1545 let uri = Uri::from_file_path("/test/Cargo.toml").unwrap();
1546
1547 let content1 = r#"[dependencies]
1549serde = "1.0"
1550"#;
1551
1552 let ecosystem = state.ecosystem_registry.get("cargo").unwrap();
1553 let parse_result1 = ecosystem.parse_manifest(content1, &uri).await.unwrap();
1554 let doc_state1 =
1555 DocumentState::new_from_parse_result("cargo", content1.to_string(), parse_result1);
1556 state.update_document(uri.clone(), doc_state1);
1557
1558 {
1560 let mut doc = state.documents.get_mut(&uri).unwrap();
1561 doc.cached_versions
1562 .insert("serde".to_string(), "1.0.210".to_string());
1563 }
1564
1565 let content2 = r#"[dependencies
1567serde = "1.0"
1568"#;
1569
1570 let parse_result2 = ecosystem.parse_manifest(content2, &uri).await.ok();
1571 assert!(
1572 parse_result2.is_none(),
1573 "Parse should fail for invalid TOML"
1574 );
1575
1576 let mut doc_state2 =
1577 DocumentState::new_without_parse_result("cargo", content2.to_string());
1578
1579 if let Some(old_doc) = state.get_document(&uri) {
1580 preserve_cache(&mut doc_state2, &old_doc);
1581 }
1582
1583 state.update_document(uri.clone(), doc_state2);
1584
1585 let doc = state.get_document(&uri).unwrap();
1587 assert_eq!(
1588 doc.cached_versions.len(),
1589 1,
1590 "Cache should be preserved on parse failure"
1591 );
1592 assert_eq!(
1593 doc.cached_versions.get("serde"),
1594 Some(&"1.0.210".to_string())
1595 );
1596 }
1597
1598 #[test]
1599 fn test_dependency_diff_detects_additions() {
1600 let old: HashSet<String> = ["serde", "tokio"].iter().map(|s| s.to_string()).collect();
1601 let new: HashSet<String> = ["serde", "tokio", "anyhow"]
1602 .iter()
1603 .map(|s| s.to_string())
1604 .collect();
1605
1606 let diff = DependencyDiff::compute(&old, &new);
1607
1608 assert_eq!(diff.added.len(), 1);
1609 assert!(diff.added.contains(&"anyhow".to_string()));
1610 assert!(diff.removed.is_empty());
1611 assert!(diff.needs_fetch());
1612 }
1613
1614 #[test]
1615 fn test_dependency_diff_detects_removals() {
1616 let old: HashSet<String> = ["serde", "tokio", "anyhow"]
1617 .iter()
1618 .map(|s| s.to_string())
1619 .collect();
1620 let new: HashSet<String> = ["serde", "tokio"].iter().map(|s| s.to_string()).collect();
1621
1622 let diff = DependencyDiff::compute(&old, &new);
1623
1624 assert!(diff.added.is_empty());
1625 assert_eq!(diff.removed.len(), 1);
1626 assert!(diff.removed.contains(&"anyhow".to_string()));
1627 assert!(!diff.needs_fetch());
1628 }
1629
1630 #[test]
1631 fn test_dependency_diff_no_changes() {
1632 let old: HashSet<String> = ["serde", "tokio"].iter().map(|s| s.to_string()).collect();
1633 let new: HashSet<String> = ["serde", "tokio"].iter().map(|s| s.to_string()).collect();
1634
1635 let diff = DependencyDiff::compute(&old, &new);
1636
1637 assert!(diff.added.is_empty());
1638 assert!(diff.removed.is_empty());
1639 assert!(!diff.needs_fetch());
1640 }
1641
1642 #[test]
1643 fn test_dependency_diff_empty_to_new() {
1644 let old: HashSet<String> = HashSet::new();
1645 let new: HashSet<String> = ["serde", "tokio"].iter().map(|s| s.to_string()).collect();
1646
1647 let diff = DependencyDiff::compute(&old, &new);
1648
1649 assert_eq!(diff.added.len(), 2);
1650 assert!(diff.removed.is_empty());
1651 assert!(diff.needs_fetch());
1652 }
1653
1654 #[tokio::test]
1655 async fn test_cache_pruned_on_dependency_removal() {
1656 let state = Arc::new(ServerState::new());
1657 let uri = Uri::from_file_path("/test/Cargo.toml").unwrap();
1658
1659 let content1 = r#"[dependencies]
1661serde = "1.0"
1662tokio = "1.0"
1663anyhow = "1.0"
1664"#;
1665
1666 let ecosystem = state.ecosystem_registry.get("cargo").unwrap();
1667 let parse_result1 = ecosystem.parse_manifest(content1, &uri).await.unwrap();
1668 let doc_state1 =
1669 DocumentState::new_from_parse_result("cargo", content1.to_string(), parse_result1);
1670 state.update_document(uri.clone(), doc_state1);
1671
1672 {
1674 let mut doc = state.documents.get_mut(&uri).unwrap();
1675 doc.cached_versions
1676 .insert("serde".to_string(), "1.0.210".to_string());
1677 doc.cached_versions
1678 .insert("tokio".to_string(), "1.40.0".to_string());
1679 doc.cached_versions
1680 .insert("anyhow".to_string(), "1.0.89".to_string());
1681 }
1682
1683 let content2 = r#"[dependencies]
1685serde = "1.0"
1686tokio = "1.0"
1687"#;
1688
1689 let old_dep_names: HashSet<String> = ["serde", "tokio", "anyhow"]
1691 .iter()
1692 .map(|s| s.to_string())
1693 .collect();
1694 let new_dep_names: HashSet<String> =
1695 ["serde", "tokio"].iter().map(|s| s.to_string()).collect();
1696 let diff = DependencyDiff::compute(&old_dep_names, &new_dep_names);
1697
1698 let parse_result2 = ecosystem.parse_manifest(content2, &uri).await.unwrap();
1699 let mut doc_state2 =
1700 DocumentState::new_from_parse_result("cargo", content2.to_string(), parse_result2);
1701
1702 if let Some(old_doc) = state.get_document(&uri) {
1703 preserve_cache(&mut doc_state2, &old_doc);
1704 }
1705
1706 for removed_dep in &diff.removed {
1708 doc_state2.cached_versions.remove(removed_dep);
1709 }
1710
1711 state.update_document(uri.clone(), doc_state2);
1712
1713 let doc = state.get_document(&uri).unwrap();
1715 assert_eq!(
1716 doc.cached_versions.len(),
1717 2,
1718 "anyhow should be removed from cache"
1719 );
1720 assert!(doc.cached_versions.contains_key("serde"));
1721 assert!(doc.cached_versions.contains_key("tokio"));
1722 assert!(!doc.cached_versions.contains_key("anyhow"));
1723 }
1724 }
1725}