1use super::loader::load_document_from_disk;
7use super::state::{DocumentState, ServerState};
8use crate::config::DepsConfig;
9use crate::handlers::diagnostics;
10use crate::progress::{ProgressSender, RegistryProgress};
11use deps_core::Ecosystem;
12use deps_core::Registry;
13use deps_core::Result;
14use std::collections::{HashMap, HashSet};
15use std::sync::Arc;
16use tokio::sync::RwLock;
17use tokio::task::JoinHandle;
18use tower_lsp_server::Client;
19use tower_lsp_server::ls_types::{MessageType, Uri};
20
21fn preserve_cache(new_state: &mut DocumentState, old_state: &DocumentState) {
24 tracing::trace!(
25 cached = old_state.cached_versions.len(),
26 resolved = old_state.resolved_versions.len(),
27 "preserving version cache"
28 );
29 new_state
30 .cached_versions
31 .clone_from(&old_state.cached_versions);
32 new_state
33 .resolved_versions
34 .clone_from(&old_state.resolved_versions);
35}
36
37#[derive(Debug, Clone, Default)]
39struct DependencyDiff {
40 added: Vec<String>,
41 #[allow(dead_code)]
42 removed: Vec<String>,
43}
44
45impl DependencyDiff {
46 fn compute(old_deps: &HashSet<String>, new_deps: &HashSet<String>) -> Self {
47 Self {
48 added: new_deps.difference(old_deps).cloned().collect(),
49 removed: old_deps.difference(new_deps).cloned().collect(),
50 }
51 }
52
53 #[cfg(test)]
54 fn needs_fetch(&self) -> bool {
55 !self.added.is_empty()
56 }
57}
58
59struct FetchResult {
61 versions: HashMap<String, String>,
63 failed_count: usize,
65 first_error: Option<String>,
67}
68
69async fn fetch_latest_versions_parallel(
98 registry: Arc<dyn Registry>,
99 package_names: Vec<String>,
100 progress_sender: Option<ProgressSender>,
101 timeout_secs: u64,
102 max_concurrent: usize,
103) -> FetchResult {
104 use futures::stream::{self, StreamExt};
105 use std::time::Duration;
106
107 let fetched = Arc::new(std::sync::atomic::AtomicUsize::new(0));
108 let failed = Arc::new(std::sync::atomic::AtomicUsize::new(0));
109 let first_error: Arc<std::sync::Mutex<Option<String>>> = Arc::new(std::sync::Mutex::new(None));
110 let timeout = Duration::from_secs(timeout_secs);
111
112 let results: Vec<_> = stream::iter(package_names)
113 .map(|name| {
114 let registry = Arc::clone(®istry);
115 let fetched = Arc::clone(&fetched);
116 let failed = Arc::clone(&failed);
117 let first_error = Arc::clone(&first_error);
118 let progress_sender = progress_sender.clone();
119 async move {
120 let result =
121 tokio::time::timeout(timeout, registry.get_latest_matching(&name, "*")).await;
122
123 let version = match result {
124 Ok(Ok(Some(v))) => {
125 tracing::debug!(package = %name, version = %v.version_string(), "fetched");
126 Some((name.clone(), v.version_string().to_string()))
127 }
128 Ok(Ok(None)) => {
129 tracing::debug!(package = %name, "no version found");
130 None
131 }
132 Ok(Err(e)) => {
133 tracing::warn!(package = %name, error = %e, "fetch failed");
134 failed.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
135 let mut fe = first_error.lock().unwrap_or_else(|p| p.into_inner());
136 if fe.is_none() {
137 *fe = Some(e.to_string());
138 }
139 None
140 }
141 Err(_) => {
142 tracing::warn!(package = %name, "fetch timed out ({}s)", timeout.as_secs());
143 failed.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
144 None
145 }
146 };
147
148 let count = fetched.fetch_add(1, std::sync::atomic::Ordering::Relaxed) + 1;
149 if let Some(ref sender) = progress_sender {
150 sender.send(count);
151 }
152
153 version
154 }
155 })
156 .buffer_unordered(max_concurrent)
157 .collect()
158 .await;
159
160 FetchResult {
161 versions: results.into_iter().flatten().collect(),
162 failed_count: failed.load(std::sync::atomic::Ordering::Relaxed),
163 first_error: first_error.lock().unwrap_or_else(|p| p.into_inner()).take(),
164 }
165}
166
167pub async fn handle_document_open(
172 uri: Uri,
173 content: String,
174 state: Arc<ServerState>,
175 client: Client,
176 config: Arc<RwLock<DepsConfig>>,
177) -> Result<JoinHandle<()>> {
178 let ecosystem = match state.ecosystem_registry.get_for_uri(&uri) {
180 Some(e) => e,
181 None => {
182 tracing::debug!("No ecosystem handler for {:?}", uri);
183 return Err(deps_core::error::DepsError::UnsupportedEcosystem(format!(
184 "{uri:?}"
185 )));
186 }
187 };
188
189 tracing::info!(
190 "Opening {:?} with ecosystem: {}",
191 uri,
192 ecosystem.display_name()
193 );
194
195 let parse_result = ecosystem.parse_manifest(&content, &uri).await.ok();
197
198 let doc_state = if let Some(pr) = parse_result {
200 DocumentState::new_from_parse_result(ecosystem.id(), content, pr)
201 } else {
202 tracing::debug!("Failed to parse manifest, storing document without parse result");
203 DocumentState::new_without_parse_result(ecosystem.id(), content)
204 };
205
206 state.update_document(uri.clone(), doc_state);
207
208 let cache_config = { config.read().await.cache.clone() };
210
211 let uri_clone = uri.clone();
213 let state_clone = Arc::clone(&state);
214 let ecosystem_clone = Arc::clone(&ecosystem);
215 let client_clone = client.clone();
216
217 let task = tokio::spawn(async move {
218 tracing::debug!("background task started");
219
220 let resolved_versions =
222 load_resolved_versions(&uri_clone, &state_clone, ecosystem_clone.as_ref()).await;
223
224 if !resolved_versions.is_empty()
226 && let Some(mut doc) = state_clone.documents.get_mut(&uri_clone)
227 {
228 doc.update_resolved_versions(resolved_versions.clone());
229 doc.update_cached_versions(resolved_versions.clone());
231 }
232
233 let dep_names: Vec<String> = {
235 let doc = match state_clone.get_document(&uri_clone) {
236 Some(d) => d,
237 None => {
238 tracing::warn!("document not found, aborting fetch");
239 return;
240 }
241 };
242 let parse_result = match doc.parse_result() {
243 Some(p) => p,
244 None => {
245 tracing::warn!("no parse result, aborting fetch");
246 return;
247 }
248 };
249 parse_result
250 .dependencies()
251 .into_iter()
252 .map(|d| d.name().to_string())
253 .collect()
254 };
255
256 tracing::debug!(count = dep_names.len(), "starting registry fetch");
257
258 if let Some(mut doc) = state_clone.documents.get_mut(&uri_clone) {
260 doc.set_loading();
261 }
262
263 let (progress, progress_sender) = match tokio::time::timeout(
264 std::time::Duration::from_secs(2),
265 RegistryProgress::start(client_clone.clone(), uri_clone.as_str(), dep_names.len()),
266 )
267 .await
268 {
269 Ok(Ok((p, s))) => (Some(p), Some(s)),
270 _ => (None, None),
271 };
272
273 tracing::debug!("progress started, fetching versions");
274
275 let registry = ecosystem_clone.registry();
277 let fetch_result = fetch_latest_versions_parallel(
278 registry,
279 dep_names,
280 progress_sender,
281 cache_config.fetch_timeout_secs,
282 cache_config.max_concurrent_fetches,
283 )
284 .await;
285
286 let success = !fetch_result.versions.is_empty();
287 tracing::debug!(
288 fetched = fetch_result.versions.len(),
289 failed = fetch_result.failed_count,
290 "registry fetch complete"
291 );
292
293 if let Some(mut doc) = state_clone.documents.get_mut(&uri_clone) {
295 doc.update_cached_versions(fetch_result.versions);
296 if success {
297 doc.set_loaded();
298 } else {
299 doc.set_failed();
300 }
301 }
302
303 if let Some(progress) = progress {
305 progress.end(success).await;
306 }
307
308 if fetch_result.failed_count > 0 {
310 let message = if let Some(err) = &fetch_result.first_error {
311 format!("deps-lsp: {err}")
312 } else {
313 format!(
314 "deps-lsp: {} package(s) failed to fetch (timeout or network error)",
315 fetch_result.failed_count
316 )
317 };
318 client_clone
319 .show_message(MessageType::WARNING, message)
320 .await;
321 }
322
323 if let Err(e) = client_clone.inlay_hint_refresh().await {
326 tracing::debug!("inlay_hint_refresh not supported: {:?}", e);
327 }
328
329 let diags =
331 diagnostics::generate_diagnostics_internal(Arc::clone(&state_clone), &uri_clone).await;
332
333 client_clone
334 .publish_diagnostics(uri_clone.clone(), diags, None)
335 .await;
336 });
337
338 Ok(task)
339}
340
341pub async fn handle_document_change(
346 uri: Uri,
347 content: String,
348 state: Arc<ServerState>,
349 client: Client,
350 config: Arc<RwLock<DepsConfig>>,
351) -> Result<JoinHandle<()>> {
352 let ecosystem = match state.ecosystem_registry.get_for_uri(&uri) {
354 Some(e) => e,
355 None => {
356 tracing::debug!("No ecosystem handler for {:?}", uri);
357 return Err(deps_core::error::DepsError::UnsupportedEcosystem(format!(
358 "{uri:?}"
359 )));
360 }
361 };
362
363 let old_dep_names: HashSet<String> =
365 state.get_document(&uri).map_or_else(HashSet::new, |doc| {
366 doc.parse_result()
367 .map(|pr| {
368 pr.dependencies()
369 .into_iter()
370 .map(|d| d.name().to_string())
371 .collect()
372 })
373 .unwrap_or_default()
374 });
375
376 let parse_result = ecosystem.parse_manifest(&content, &uri).await.ok();
378
379 let new_dep_names: HashSet<String> = parse_result
381 .as_ref()
382 .map(|pr| {
383 pr.dependencies()
384 .into_iter()
385 .map(|d| d.name().to_string())
386 .collect()
387 })
388 .unwrap_or_default();
389
390 let diff = DependencyDiff::compute(&old_dep_names, &new_dep_names);
392 tracing::debug!(
393 added = diff.added.len(),
394 removed = diff.removed.len(),
395 "dependency diff"
396 );
397
398 let mut doc_state = if let Some(pr) = parse_result {
399 DocumentState::new_from_parse_result(ecosystem.id(), content, pr)
400 } else {
401 tracing::debug!("Failed to parse manifest, storing document without parse result");
402 DocumentState::new_without_parse_result(ecosystem.id(), content)
403 };
404
405 if let Some(old_doc) = state.get_document(&uri) {
406 preserve_cache(&mut doc_state, &old_doc);
407 }
408
409 for removed_dep in &diff.removed {
411 doc_state.cached_versions.remove(removed_dep);
412 doc_state.resolved_versions.remove(removed_dep);
413 }
414
415 state.update_document(uri.clone(), doc_state);
416
417 let cache_config = { config.read().await.cache.clone() };
419
420 let uri_clone = uri.clone();
422 let state_clone = Arc::clone(&state);
423 let ecosystem_clone = Arc::clone(&ecosystem);
424 let client_clone = client.clone();
425 let deps_to_fetch = diff.added;
426
427 let task = tokio::spawn(async move {
428 tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
430
431 let resolved_versions =
433 load_resolved_versions(&uri_clone, &state_clone, ecosystem_clone.as_ref()).await;
434
435 if !resolved_versions.is_empty()
438 && let Some(mut doc) = state_clone.documents.get_mut(&uri_clone)
439 {
440 doc.update_resolved_versions(resolved_versions.clone());
441 }
442
443 if deps_to_fetch.is_empty() {
445 tracing::debug!("no new dependencies, skipping registry fetch");
446
447 if let Some(mut doc) = state_clone.documents.get_mut(&uri_clone) {
448 doc.set_loaded();
449 }
450
451 if let Err(e) = client_clone.inlay_hint_refresh().await {
452 tracing::debug!("inlay_hint_refresh not supported: {:?}", e);
453 }
454
455 let diags =
456 diagnostics::generate_diagnostics_internal(Arc::clone(&state_clone), &uri_clone)
457 .await;
458 client_clone
459 .publish_diagnostics(uri_clone.clone(), diags, None)
460 .await;
461 return;
462 }
463
464 tracing::info!(
465 count = deps_to_fetch.len(),
466 "fetching versions for new dependencies"
467 );
468
469 if let Some(mut doc) = state_clone.documents.get_mut(&uri_clone) {
471 doc.set_loading();
472 }
473
474 let (progress, progress_sender) = match tokio::time::timeout(
475 std::time::Duration::from_secs(2),
476 RegistryProgress::start(
477 client_clone.clone(),
478 uri_clone.as_str(),
479 deps_to_fetch.len(),
480 ),
481 )
482 .await
483 {
484 Ok(Ok((p, s))) => (Some(p), Some(s)),
485 _ => (None, None),
486 };
487
488 let registry = ecosystem_clone.registry();
490 let fetch_result = fetch_latest_versions_parallel(
491 registry,
492 deps_to_fetch,
493 progress_sender,
494 cache_config.fetch_timeout_secs,
495 cache_config.max_concurrent_fetches,
496 )
497 .await;
498
499 let success = !fetch_result.versions.is_empty();
500
501 if let Some(mut doc) = state_clone.documents.get_mut(&uri_clone) {
503 for (name, version) in fetch_result.versions {
504 doc.cached_versions.insert(name, version);
505 }
506 if success {
507 doc.set_loaded();
508 } else {
509 doc.set_failed();
510 }
511 }
512
513 if let Some(progress) = progress {
514 progress.end(success).await;
515 }
516
517 if fetch_result.failed_count > 0 {
519 let message = if let Some(err) = &fetch_result.first_error {
520 format!("deps-lsp: {err}")
521 } else {
522 format!(
523 "deps-lsp: {} package(s) failed to fetch (timeout or network error)",
524 fetch_result.failed_count
525 )
526 };
527 client_clone
528 .show_message(MessageType::WARNING, message)
529 .await;
530 }
531
532 if let Err(e) = client_clone.inlay_hint_refresh().await {
533 tracing::debug!("inlay_hint_refresh not supported: {:?}", e);
534 }
535
536 let diags =
537 diagnostics::generate_diagnostics_internal(Arc::clone(&state_clone), &uri_clone).await;
538
539 client_clone
540 .publish_diagnostics(uri_clone.clone(), diags, None)
541 .await;
542 });
543
544 Ok(task)
545}
546
547async fn load_resolved_versions(
553 uri: &Uri,
554 state: &ServerState,
555 ecosystem: &dyn Ecosystem,
556) -> HashMap<String, String> {
557 let lock_provider = match ecosystem.lockfile_provider() {
558 Some(p) => p,
559 None => {
560 tracing::debug!("No lock file provider for ecosystem {}", ecosystem.id());
561 return HashMap::new();
562 }
563 };
564
565 let lockfile_path = match lock_provider.locate_lockfile(uri) {
566 Some(path) => path,
567 None => {
568 tracing::debug!("No lock file found for {:?}", uri);
569 return HashMap::new();
570 }
571 };
572
573 match state
574 .lockfile_cache
575 .get_or_parse(lock_provider.as_ref(), &lockfile_path)
576 .await
577 {
578 Ok(resolved) => {
579 tracing::info!(
580 "Loaded {} resolved versions from {}",
581 resolved.len(),
582 lockfile_path.display()
583 );
584 resolved
585 .iter()
586 .map(|(name, pkg)| (name.clone(), pkg.version.clone()))
587 .collect()
588 }
589 Err(e) => {
590 tracing::warn!("Failed to parse lock file: {}", e);
591 HashMap::new()
592 }
593 }
594}
595
596pub async fn ensure_document_loaded(
643 uri: &Uri,
644 state: Arc<ServerState>,
645 client: Client,
646 config: Arc<RwLock<DepsConfig>>,
647) -> bool {
648 if state.get_document(uri).is_some() {
650 tracing::debug!("Document already loaded: {:?}", uri);
651 return true;
652 }
653
654 let cold_start_config = { config.read().await.cold_start.clone() };
656
657 if !cold_start_config.enabled {
659 tracing::debug!("Cold start disabled via configuration");
660 return false;
661 }
662
663 if !state.cold_start_limiter.allow_cold_start(uri) {
665 tracing::warn!("Cold start rate limited: {:?}", uri);
666 return false;
667 }
668
669 if state.ecosystem_registry.get_for_uri(uri).is_none() {
671 tracing::debug!("Unsupported file type: {:?}", uri);
672 return false;
673 }
674
675 tracing::info!("Loading document from disk (cold start): {:?}", uri);
677 let content = match load_document_from_disk(uri).await {
678 Ok(c) => c,
679 Err(e) => {
680 tracing::warn!("Failed to load document {:?}: {}", uri, e);
681 client
682 .log_message(MessageType::WARNING, format!("Could not load file: {e}"))
683 .await;
684 return false;
685 }
686 };
687
688 match handle_document_open(
690 uri.clone(),
691 content,
692 Arc::clone(&state),
693 client.clone(),
694 Arc::clone(&config),
695 )
696 .await
697 {
698 Ok(task) => {
699 state.spawn_background_task(uri.clone(), task).await;
700 tracing::info!("Document loaded successfully from disk: {:?}", uri);
701 true
702 }
703 Err(e) => {
704 tracing::warn!("Failed to process loaded document {:?}: {}", uri, e);
705 false
706 }
707 }
708}
709
710#[cfg(test)]
711mod tests {
712 use super::*;
713
714 #[test]
717 fn test_ecosystem_registry_unknown_file() {
718 let state = ServerState::new();
719 let unknown_uri =
720 tower_lsp_server::ls_types::Uri::from_file_path("/test/unknown.txt").unwrap();
721 assert!(state.ecosystem_registry.get_for_uri(&unknown_uri).is_none());
722 }
723
724 #[tokio::test]
725 async fn test_ensure_document_loaded_unsupported_file_check() {
726 let state = Arc::new(ServerState::new());
728 let uri = Uri::from_file_path("/test/README.md").unwrap();
729
730 assert!(
732 state.ecosystem_registry.get_for_uri(&uri).is_none(),
733 "README.md should not have an ecosystem handler"
734 );
735
736 }
739
740 #[tokio::test]
741 async fn test_ensure_document_loaded_file_not_found_check() {
742 use super::load_document_from_disk;
744
745 let uri = Uri::from_file_path("/nonexistent/Cargo.toml").unwrap();
746 let result = load_document_from_disk(&uri).await;
747
748 assert!(result.is_err(), "Should fail for missing files");
749
750 }
752
753 #[tokio::test]
754 async fn test_fetch_latest_versions_parallel_with_timeout() {
755 use deps_core::{Metadata, Registry, Version};
756 use std::any::Any;
757 use std::time::Duration;
758
759 struct TimeoutRegistry;
761
762 impl Registry for TimeoutRegistry {
763 fn get_versions<'a>(
764 &'a self,
765 _name: &'a str,
766 ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Version>>>>
767 {
768 Box::pin(async move {
769 tokio::time::sleep(Duration::from_secs(10)).await;
771 Ok(vec![])
772 })
773 }
774
775 fn get_latest_matching<'a>(
776 &'a self,
777 _name: &'a str,
778 _req: &'a str,
779 ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Option<Box<dyn Version>>>>
780 {
781 Box::pin(async move {
782 tokio::time::sleep(Duration::from_secs(10)).await;
784 Ok(None)
785 })
786 }
787
788 fn search<'a>(
789 &'a self,
790 _query: &'a str,
791 _limit: usize,
792 ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Metadata>>>>
793 {
794 Box::pin(async move { Ok(vec![]) })
795 }
796
797 fn package_url(&self, name: &str) -> String {
798 format!("https://example.com/{}", name)
799 }
800
801 fn as_any(&self) -> &dyn Any {
802 self
803 }
804 }
805
806 let registry: Arc<dyn Registry> = Arc::new(TimeoutRegistry);
807 let packages = vec!["slow-package".to_string()];
808
809 let result = fetch_latest_versions_parallel(registry, packages, None, 1, 10).await;
811
812 assert!(result.versions.is_empty(), "Slow package should timeout");
814 assert_eq!(result.failed_count, 1, "Should track 1 failed package");
815 }
816
817 #[tokio::test]
818 async fn test_fetch_latest_versions_parallel_fast_packages_not_blocked() {
819 use deps_core::{Metadata, Registry, Version};
820 use std::any::Any;
821 use std::time::Duration;
822
823 struct MixedRegistry;
825
826 impl Registry for MixedRegistry {
827 fn get_versions<'a>(
828 &'a self,
829 name: &'a str,
830 ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Version>>>>
831 {
832 Box::pin(async move {
833 if name == "slow-package" {
834 tokio::time::sleep(Duration::from_secs(10)).await;
836 }
837 Ok(vec![])
839 })
840 }
841
842 fn get_latest_matching<'a>(
843 &'a self,
844 name: &'a str,
845 _req: &'a str,
846 ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Option<Box<dyn Version>>>>
847 {
848 Box::pin(async move {
849 if name == "slow-package" {
850 tokio::time::sleep(Duration::from_secs(10)).await;
852 }
853 Ok(None)
855 })
856 }
857
858 fn search<'a>(
859 &'a self,
860 _query: &'a str,
861 _limit: usize,
862 ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Metadata>>>>
863 {
864 Box::pin(async move { Ok(vec![]) })
865 }
866
867 fn package_url(&self, name: &str) -> String {
868 format!("https://example.com/{}", name)
869 }
870
871 fn as_any(&self) -> &dyn Any {
872 self
873 }
874 }
875
876 let registry: Arc<dyn Registry> = Arc::new(MixedRegistry);
877 let packages = vec!["slow-package".to_string(), "fast-package".to_string()];
878
879 let start = std::time::Instant::now();
880 let result = fetch_latest_versions_parallel(registry, packages, None, 1, 10).await;
881 let elapsed = start.elapsed();
882
883 assert!(
885 elapsed < Duration::from_secs(3),
886 "Should not wait for slow package: {:?}",
887 elapsed
888 );
889
890 assert!(
892 result.versions.is_empty(),
893 "No versions returned (test registry returns empty)"
894 );
895 assert_eq!(
896 result.failed_count, 1,
897 "Slow package should be marked as failed"
898 );
899 }
900
901 #[tokio::test]
902 async fn test_fetch_latest_versions_parallel_concurrency_limit() {
903 use deps_core::{Metadata, Registry, Version};
904 use std::any::Any;
905 use std::sync::atomic::{AtomicUsize, Ordering};
906 use std::time::Duration;
907
908 struct ConcurrencyTrackingRegistry {
910 current: Arc<AtomicUsize>,
911 max_seen: Arc<AtomicUsize>,
912 }
913
914 impl Registry for ConcurrencyTrackingRegistry {
915 fn get_versions<'a>(
916 &'a self,
917 _name: &'a str,
918 ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Version>>>>
919 {
920 Box::pin(async move {
921 let current = self.current.fetch_add(1, Ordering::SeqCst) + 1;
923
924 self.max_seen.fetch_max(current, Ordering::SeqCst);
926
927 tokio::time::sleep(Duration::from_millis(50)).await;
929
930 self.current.fetch_sub(1, Ordering::SeqCst);
932
933 Ok(vec![])
934 })
935 }
936
937 fn get_latest_matching<'a>(
938 &'a self,
939 _name: &'a str,
940 _req: &'a str,
941 ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Option<Box<dyn Version>>>>
942 {
943 Box::pin(async move {
944 let current = self.current.fetch_add(1, Ordering::SeqCst) + 1;
946
947 self.max_seen.fetch_max(current, Ordering::SeqCst);
949
950 tokio::time::sleep(Duration::from_millis(50)).await;
952
953 self.current.fetch_sub(1, Ordering::SeqCst);
955
956 Ok(None)
957 })
958 }
959
960 fn search<'a>(
961 &'a self,
962 _query: &'a str,
963 _limit: usize,
964 ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Metadata>>>>
965 {
966 Box::pin(async move { Ok(vec![]) })
967 }
968
969 fn package_url(&self, name: &str) -> String {
970 format!("https://example.com/{}", name)
971 }
972
973 fn as_any(&self) -> &dyn Any {
974 self
975 }
976 }
977
978 let current = Arc::new(AtomicUsize::new(0));
979 let max_seen = Arc::new(AtomicUsize::new(0));
980
981 let registry: Arc<dyn Registry> = Arc::new(ConcurrencyTrackingRegistry {
982 current: Arc::clone(¤t),
983 max_seen: Arc::clone(&max_seen),
984 });
985
986 let packages: Vec<String> = (0..50).map(|i| format!("package-{}", i)).collect();
988
989 fetch_latest_versions_parallel(registry, packages, None, 5, 20).await;
990
991 let max = max_seen.load(Ordering::SeqCst);
993 assert!(
994 max <= 22,
995 "Concurrency limit violated: {} concurrent requests (limit: 20)",
996 max
997 );
998 }
999
1000 #[tokio::test]
1001 async fn test_fetch_partial_success_with_mixed_outcomes() {
1002 use deps_core::{Metadata, Registry, Version};
1003 use std::any::Any;
1004 use std::time::Duration;
1005
1006 #[derive(Debug)]
1008 struct MockVersion {
1009 version: String,
1010 }
1011
1012 impl Version for MockVersion {
1013 fn version_string(&self) -> &str {
1014 &self.version
1015 }
1016
1017 fn is_prerelease(&self) -> bool {
1018 false
1019 }
1020
1021 fn is_yanked(&self) -> bool {
1022 false
1023 }
1024
1025 fn as_any(&self) -> &dyn Any {
1026 self
1027 }
1028 }
1029
1030 struct MixedOutcomeRegistry;
1035
1036 impl Registry for MixedOutcomeRegistry {
1037 fn get_versions<'a>(
1038 &'a self,
1039 name: &'a str,
1040 ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Version>>>>
1041 {
1042 Box::pin(async move {
1043 match name {
1044 "package-fast" => {
1045 Ok(vec![Box::new(MockVersion {
1047 version: "1.0.0".to_string(),
1048 }) as Box<dyn Version>])
1049 }
1050 "package-slow" => {
1051 tokio::time::sleep(Duration::from_secs(10)).await;
1053 Ok(vec![])
1054 }
1055 "package-error" => {
1056 Err(deps_core::error::DepsError::CacheError(
1058 "Mock registry error".to_string(),
1059 ))
1060 }
1061 _ => Ok(vec![]),
1062 }
1063 })
1064 }
1065
1066 fn get_latest_matching<'a>(
1067 &'a self,
1068 name: &'a str,
1069 _req: &'a str,
1070 ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Option<Box<dyn Version>>>>
1071 {
1072 Box::pin(async move {
1073 match name {
1074 "package-fast" => Ok(Some(Box::new(MockVersion {
1075 version: "1.0.0".to_string(),
1076 }) as Box<dyn Version>)),
1077 "package-slow" => {
1078 tokio::time::sleep(Duration::from_secs(10)).await;
1079 Ok(None)
1080 }
1081 "package-error" => Err(deps_core::error::DepsError::CacheError(
1082 "Mock registry error".to_string(),
1083 )),
1084 _ => Ok(None),
1085 }
1086 })
1087 }
1088
1089 fn search<'a>(
1090 &'a self,
1091 _query: &'a str,
1092 _limit: usize,
1093 ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Metadata>>>>
1094 {
1095 Box::pin(async move { Ok(vec![]) })
1096 }
1097
1098 fn package_url(&self, name: &str) -> String {
1099 format!("https://example.com/{}", name)
1100 }
1101
1102 fn as_any(&self) -> &dyn Any {
1103 self
1104 }
1105 }
1106
1107 let registry: Arc<dyn Registry> = Arc::new(MixedOutcomeRegistry);
1108 let packages = vec![
1109 "package-fast".to_string(),
1110 "package-slow".to_string(),
1111 "package-error".to_string(),
1112 ];
1113
1114 let result = fetch_latest_versions_parallel(registry, packages, None, 1, 10).await;
1116
1117 assert_eq!(
1119 result.versions.len(),
1120 1,
1121 "Should have exactly 1 successful package"
1122 );
1123 assert_eq!(
1124 result.versions.get("package-fast"),
1125 Some(&"1.0.0".to_string()),
1126 "Fast package should have correct version"
1127 );
1128 assert!(
1129 !result.versions.contains_key("package-slow"),
1130 "Slow package should not be in results (timeout)"
1131 );
1132 assert!(
1133 !result.versions.contains_key("package-error"),
1134 "Error package should not be in results"
1135 );
1136 }
1137
1138 #[tokio::test]
1139 async fn test_fetch_registry_error_handled() {
1140 use deps_core::{Metadata, Registry, Version};
1141 use std::any::Any;
1142
1143 struct ErrorRegistry;
1145
1146 impl Registry for ErrorRegistry {
1147 fn get_versions<'a>(
1148 &'a self,
1149 name: &'a str,
1150 ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Version>>>>
1151 {
1152 Box::pin(async move {
1153 Err(deps_core::error::DepsError::CacheError(format!(
1154 "Failed to fetch package: {}",
1155 name
1156 )))
1157 })
1158 }
1159
1160 fn get_latest_matching<'a>(
1161 &'a self,
1162 name: &'a str,
1163 _req: &'a str,
1164 ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Option<Box<dyn Version>>>>
1165 {
1166 Box::pin(async move {
1167 Err(deps_core::error::DepsError::CacheError(format!(
1168 "Failed to fetch package: {}",
1169 name
1170 )))
1171 })
1172 }
1173
1174 fn search<'a>(
1175 &'a self,
1176 _query: &'a str,
1177 _limit: usize,
1178 ) -> deps_core::ecosystem::BoxFuture<'a, deps_core::Result<Vec<Box<dyn Metadata>>>>
1179 {
1180 Box::pin(async move { Ok(vec![]) })
1181 }
1182
1183 fn package_url(&self, name: &str) -> String {
1184 format!("https://example.com/{}", name)
1185 }
1186
1187 fn as_any(&self) -> &dyn Any {
1188 self
1189 }
1190 }
1191
1192 let registry: Arc<dyn Registry> = Arc::new(ErrorRegistry);
1193 let packages = vec![
1194 "package-1".to_string(),
1195 "package-2".to_string(),
1196 "package-3".to_string(),
1197 ];
1198
1199 let result = fetch_latest_versions_parallel(registry, packages, None, 5, 10).await;
1201
1202 assert!(
1204 result.versions.is_empty(),
1205 "All packages with errors should be omitted from results"
1206 );
1207 assert_eq!(
1208 result.failed_count, 3,
1209 "All 3 packages should be marked as failed"
1210 );
1211 }
1212
1213 #[cfg(feature = "cargo")]
1215 mod cargo_tests {
1216 use super::*;
1217
1218 #[test]
1219 fn test_ecosystem_registry_lookup() {
1220 let state = ServerState::new();
1221 let cargo_uri =
1222 tower_lsp_server::ls_types::Uri::from_file_path("/test/Cargo.toml").unwrap();
1223 assert!(state.ecosystem_registry.get_for_uri(&cargo_uri).is_some());
1224 }
1225
1226 #[tokio::test]
1227 async fn test_document_parsing() {
1228 let state = Arc::new(ServerState::new());
1229 let uri = tower_lsp_server::ls_types::Uri::from_file_path("/test/Cargo.toml").unwrap();
1230 let content = r#"[dependencies]
1231serde = "1.0"
1232"#;
1233
1234 let ecosystem = state
1235 .ecosystem_registry
1236 .get_for_uri(&uri)
1237 .expect("Cargo ecosystem not found");
1238
1239 let parse_result = ecosystem.parse_manifest(content, &uri).await;
1240 assert!(parse_result.is_ok());
1241
1242 let doc_state = DocumentState::new_from_parse_result(
1243 "cargo",
1244 content.to_string(),
1245 parse_result.unwrap(),
1246 );
1247 state.update_document(uri.clone(), doc_state);
1248
1249 assert_eq!(state.document_count(), 1);
1250 let doc = state.get_document(&uri).unwrap();
1251 assert_eq!(doc.ecosystem_id, "cargo");
1252 }
1253
1254 #[tokio::test]
1255 async fn test_document_stored_even_when_parsing_fails() {
1256 let state = Arc::new(ServerState::new());
1257 let uri = tower_lsp_server::ls_types::Uri::from_file_path("/test/Cargo.toml").unwrap();
1258 let content = r#"[dependencies
1260serde = "1.0"
1261"#;
1262
1263 let ecosystem = state
1264 .ecosystem_registry
1265 .get_for_uri(&uri)
1266 .expect("Cargo ecosystem not found");
1267
1268 let parse_result = ecosystem.parse_manifest(content, &uri).await.ok();
1270 assert!(
1271 parse_result.is_none(),
1272 "Parsing should fail for invalid TOML"
1273 );
1274
1275 let doc_state = if let Some(pr) = parse_result {
1277 DocumentState::new_from_parse_result("cargo", content.to_string(), pr)
1278 } else {
1279 DocumentState::new_without_parse_result("cargo", content.to_string())
1280 };
1281
1282 state.update_document(uri.clone(), doc_state);
1283
1284 let doc = state.get_document(&uri);
1286 assert!(
1287 doc.is_some(),
1288 "Document should be stored even when parsing fails"
1289 );
1290
1291 let doc = doc.unwrap();
1292 assert_eq!(doc.ecosystem_id, "cargo");
1293 assert_eq!(doc.content, content);
1294 assert!(
1295 doc.parse_result().is_none(),
1296 "Parse result should be None for failed parse"
1297 );
1298 }
1299
1300 #[tokio::test]
1301 async fn test_ensure_document_loaded_fast_path() {
1302 let state = Arc::new(ServerState::new());
1304 let uri = Uri::from_file_path("/test/Cargo.toml").unwrap();
1305 let content = r#"[dependencies]
1306serde = "1.0""#;
1307
1308 let ecosystem = state
1310 .ecosystem_registry
1311 .get_for_uri(&uri)
1312 .expect("Cargo ecosystem");
1313 let parse_result = ecosystem.parse_manifest(content, &uri).await.unwrap();
1314 let doc_state =
1315 DocumentState::new_from_parse_result("cargo", content.to_string(), parse_result);
1316 state.update_document(uri.clone(), doc_state);
1317
1318 assert!(
1320 state.get_document(&uri).is_some(),
1321 "Document should exist in state"
1322 );
1323 assert_eq!(state.document_count(), 1, "Document count should be 1");
1324
1325 }
1329
1330 #[tokio::test]
1331 async fn test_ensure_document_loaded_successful_disk_load() {
1332 use super::super::load_document_from_disk;
1334 use std::fs;
1335 use tempfile::TempDir;
1336
1337 let temp_dir = TempDir::new().unwrap();
1339 let cargo_toml_path = temp_dir.path().join("Cargo.toml");
1340 let content = r#"[package]
1341name = "test"
1342version = "0.1.0"
1343
1344[dependencies]
1345serde = "1.0"
1346"#;
1347 fs::write(&cargo_toml_path, content).unwrap();
1348
1349 let uri = Uri::from_file_path(&cargo_toml_path).unwrap();
1350
1351 let loaded_content = load_document_from_disk(&uri).await.unwrap();
1353 assert_eq!(loaded_content, content);
1354
1355 let state = Arc::new(ServerState::new());
1357 let ecosystem = state
1358 .ecosystem_registry
1359 .get_for_uri(&uri)
1360 .expect("Cargo ecosystem");
1361 let parse_result = ecosystem.parse_manifest(&loaded_content, &uri).await;
1362 assert!(parse_result.is_ok(), "Should parse successfully");
1363
1364 }
1366
1367 #[tokio::test]
1368 async fn test_ensure_document_loaded_idempotent_check() {
1369 let state = Arc::new(ServerState::new());
1371 let uri = Uri::from_file_path("/test/Cargo.toml").unwrap();
1372 let content = r#"[dependencies]
1373serde = "1.0""#;
1374
1375 let ecosystem = state
1376 .ecosystem_registry
1377 .get_for_uri(&uri)
1378 .expect("Cargo ecosystem");
1379
1380 let parse_result1 = ecosystem.parse_manifest(content, &uri).await.unwrap();
1382 let parse_result2 = ecosystem.parse_manifest(content, &uri).await.unwrap();
1383
1384 let doc_state1 =
1386 DocumentState::new_from_parse_result("cargo", content.to_string(), parse_result1);
1387 state.update_document(uri.clone(), doc_state1);
1388 assert_eq!(state.document_count(), 1);
1389
1390 let doc_state2 =
1392 DocumentState::new_from_parse_result("cargo", content.to_string(), parse_result2);
1393 state.update_document(uri.clone(), doc_state2);
1394 assert_eq!(
1395 state.document_count(),
1396 1,
1397 "Should still have only 1 document"
1398 );
1399 }
1400 }
1401
1402 #[cfg(feature = "npm")]
1404 mod npm_tests {
1405 use super::*;
1406
1407 #[test]
1408 fn test_ecosystem_registry_lookup() {
1409 let state = ServerState::new();
1410 let npm_uri =
1411 tower_lsp_server::ls_types::Uri::from_file_path("/test/package.json").unwrap();
1412 assert!(state.ecosystem_registry.get_for_uri(&npm_uri).is_some());
1413 }
1414
1415 #[tokio::test]
1416 async fn test_document_parsing() {
1417 let state = Arc::new(ServerState::new());
1418 let uri =
1419 tower_lsp_server::ls_types::Uri::from_file_path("/test/package.json").unwrap();
1420 let content = r#"{"dependencies": {"express": "^4.18.0"}}"#;
1421
1422 let ecosystem = state
1423 .ecosystem_registry
1424 .get_for_uri(&uri)
1425 .expect("npm ecosystem not found");
1426
1427 let parse_result = ecosystem.parse_manifest(content, &uri).await;
1428 assert!(parse_result.is_ok());
1429
1430 let doc_state = DocumentState::new_from_parse_result(
1431 "npm",
1432 content.to_string(),
1433 parse_result.unwrap(),
1434 );
1435 state.update_document(uri.clone(), doc_state);
1436
1437 let doc = state.get_document(&uri).unwrap();
1438 assert_eq!(doc.ecosystem_id, "npm");
1439 }
1440 }
1441
1442 #[cfg(feature = "pypi")]
1444 mod pypi_tests {
1445 use super::*;
1446
1447 #[test]
1448 fn test_ecosystem_registry_lookup() {
1449 let state = ServerState::new();
1450 let pypi_uri =
1451 tower_lsp_server::ls_types::Uri::from_file_path("/test/pyproject.toml").unwrap();
1452 assert!(state.ecosystem_registry.get_for_uri(&pypi_uri).is_some());
1453 }
1454
1455 #[tokio::test]
1456 async fn test_document_parsing() {
1457 let state = Arc::new(ServerState::new());
1458 let uri =
1459 tower_lsp_server::ls_types::Uri::from_file_path("/test/pyproject.toml").unwrap();
1460 let content = r#"[project]
1461dependencies = ["requests>=2.0.0"]
1462"#;
1463
1464 let ecosystem = state
1465 .ecosystem_registry
1466 .get_for_uri(&uri)
1467 .expect("pypi ecosystem not found");
1468
1469 let parse_result = ecosystem.parse_manifest(content, &uri).await;
1470 assert!(parse_result.is_ok());
1471
1472 let doc_state = DocumentState::new_from_parse_result(
1473 "pypi",
1474 content.to_string(),
1475 parse_result.unwrap(),
1476 );
1477 state.update_document(uri.clone(), doc_state);
1478
1479 let doc = state.get_document(&uri).unwrap();
1480 assert_eq!(doc.ecosystem_id, "pypi");
1481 }
1482 }
1483
1484 #[cfg(feature = "go")]
1486 mod go_tests {
1487 use super::*;
1488
1489 #[test]
1490 fn test_ecosystem_registry_lookup() {
1491 let state = ServerState::new();
1492 let go_uri = tower_lsp_server::ls_types::Uri::from_file_path("/test/go.mod").unwrap();
1493 assert!(state.ecosystem_registry.get_for_uri(&go_uri).is_some());
1494 }
1495
1496 #[tokio::test]
1497 async fn test_document_parsing() {
1498 let state = Arc::new(ServerState::new());
1499 let uri = tower_lsp_server::ls_types::Uri::from_file_path("/test/go.mod").unwrap();
1500 let content = r"module example.com/mymodule
1501
1502go 1.21
1503
1504require github.com/gorilla/mux v1.8.0
1505";
1506
1507 let ecosystem = state
1508 .ecosystem_registry
1509 .get_for_uri(&uri)
1510 .expect("go ecosystem not found");
1511
1512 let parse_result = ecosystem.parse_manifest(content, &uri).await;
1513 assert!(parse_result.is_ok());
1514
1515 let doc_state = DocumentState::new_from_parse_result(
1516 "go",
1517 content.to_string(),
1518 parse_result.unwrap(),
1519 );
1520 state.update_document(uri.clone(), doc_state);
1521
1522 let doc = state.get_document(&uri).unwrap();
1523 assert_eq!(doc.ecosystem_id, "go");
1524 }
1525 }
1526
1527 #[cfg(feature = "cargo")]
1529 mod incremental_fetch_tests {
1530 use super::*;
1531
1532 #[tokio::test]
1533 async fn test_preserve_cached_versions_on_change() {
1534 let state = Arc::new(ServerState::new());
1535 let uri = Uri::from_file_path("/test/Cargo.toml").unwrap();
1536
1537 let content1 = r#"[dependencies]
1539serde = "1.0"
1540tokio = "1.0"
1541"#;
1542
1543 let ecosystem = state.ecosystem_registry.get("cargo").unwrap();
1544 let parse_result1 = ecosystem.parse_manifest(content1, &uri).await.unwrap();
1545 let doc_state1 =
1546 DocumentState::new_from_parse_result("cargo", content1.to_string(), parse_result1);
1547 state.update_document(uri.clone(), doc_state1);
1548
1549 {
1551 let mut doc = state.documents.get_mut(&uri).unwrap();
1552 doc.cached_versions
1553 .insert("serde".to_string(), "1.0.210".to_string());
1554 doc.cached_versions
1555 .insert("tokio".to_string(), "1.40.0".to_string());
1556 doc.resolved_versions
1557 .insert("serde".to_string(), "1.0.195".to_string());
1558 doc.resolved_versions
1559 .insert("tokio".to_string(), "1.35.0".to_string());
1560 }
1561
1562 {
1564 let doc = state.get_document(&uri).unwrap();
1565 assert_eq!(doc.cached_versions.len(), 2);
1566 assert_eq!(doc.resolved_versions.len(), 2);
1567 }
1568
1569 let content2 = r#"[dependencies]
1571serde = "1.0.210"
1572tokio = "1.0"
1573"#;
1574
1575 let parse_result2 = ecosystem.parse_manifest(content2, &uri).await.unwrap();
1576 let mut doc_state2 =
1577 DocumentState::new_from_parse_result("cargo", content2.to_string(), parse_result2);
1578
1579 if let Some(old_doc) = state.get_document(&uri) {
1580 preserve_cache(&mut doc_state2, &old_doc);
1581 }
1582
1583 state.update_document(uri.clone(), doc_state2);
1584
1585 {
1587 let doc = state.get_document(&uri).unwrap();
1588 assert_eq!(
1589 doc.cached_versions.len(),
1590 2,
1591 "Cached versions should be preserved"
1592 );
1593 assert_eq!(
1594 doc.cached_versions.get("serde"),
1595 Some(&"1.0.210".to_string()),
1596 "serde cache preserved"
1597 );
1598 assert_eq!(
1599 doc.cached_versions.get("tokio"),
1600 Some(&"1.40.0".to_string()),
1601 "tokio cache preserved"
1602 );
1603 assert_eq!(
1604 doc.resolved_versions.len(),
1605 2,
1606 "Resolved versions should be preserved"
1607 );
1608 }
1609 }
1610
1611 #[tokio::test]
1612 async fn test_first_open_has_empty_cache() {
1613 let state = Arc::new(ServerState::new());
1614 let uri = Uri::from_file_path("/test/Cargo.toml").unwrap();
1615
1616 let content = r#"[dependencies]
1617serde = "1.0"
1618"#;
1619
1620 let ecosystem = state.ecosystem_registry.get("cargo").unwrap();
1621 let parse_result = ecosystem.parse_manifest(content, &uri).await.unwrap();
1622 let doc_state =
1623 DocumentState::new_from_parse_result("cargo", content.to_string(), parse_result);
1624 state.update_document(uri.clone(), doc_state);
1625
1626 let doc = state.get_document(&uri).unwrap();
1628 assert_eq!(
1629 doc.cached_versions.len(),
1630 0,
1631 "First open should have empty cache"
1632 );
1633 }
1634
1635 #[tokio::test]
1636 async fn test_preserve_cache_on_parse_failure() {
1637 let state = Arc::new(ServerState::new());
1638 let uri = Uri::from_file_path("/test/Cargo.toml").unwrap();
1639
1640 let content1 = r#"[dependencies]
1642serde = "1.0"
1643"#;
1644
1645 let ecosystem = state.ecosystem_registry.get("cargo").unwrap();
1646 let parse_result1 = ecosystem.parse_manifest(content1, &uri).await.unwrap();
1647 let doc_state1 =
1648 DocumentState::new_from_parse_result("cargo", content1.to_string(), parse_result1);
1649 state.update_document(uri.clone(), doc_state1);
1650
1651 {
1653 let mut doc = state.documents.get_mut(&uri).unwrap();
1654 doc.cached_versions
1655 .insert("serde".to_string(), "1.0.210".to_string());
1656 }
1657
1658 let content2 = r#"[dependencies
1660serde = "1.0"
1661"#;
1662
1663 let parse_result2 = ecosystem.parse_manifest(content2, &uri).await.ok();
1664 assert!(
1665 parse_result2.is_none(),
1666 "Parse should fail for invalid TOML"
1667 );
1668
1669 let mut doc_state2 =
1670 DocumentState::new_without_parse_result("cargo", content2.to_string());
1671
1672 if let Some(old_doc) = state.get_document(&uri) {
1673 preserve_cache(&mut doc_state2, &old_doc);
1674 }
1675
1676 state.update_document(uri.clone(), doc_state2);
1677
1678 let doc = state.get_document(&uri).unwrap();
1680 assert_eq!(
1681 doc.cached_versions.len(),
1682 1,
1683 "Cache should be preserved on parse failure"
1684 );
1685 assert_eq!(
1686 doc.cached_versions.get("serde"),
1687 Some(&"1.0.210".to_string())
1688 );
1689 }
1690
1691 #[test]
1692 fn test_dependency_diff_detects_additions() {
1693 let old: HashSet<String> = ["serde", "tokio"].iter().map(|s| s.to_string()).collect();
1694 let new: HashSet<String> = ["serde", "tokio", "anyhow"]
1695 .iter()
1696 .map(|s| s.to_string())
1697 .collect();
1698
1699 let diff = DependencyDiff::compute(&old, &new);
1700
1701 assert_eq!(diff.added.len(), 1);
1702 assert!(diff.added.contains(&"anyhow".to_string()));
1703 assert!(diff.removed.is_empty());
1704 assert!(diff.needs_fetch());
1705 }
1706
1707 #[test]
1708 fn test_dependency_diff_detects_removals() {
1709 let old: HashSet<String> = ["serde", "tokio", "anyhow"]
1710 .iter()
1711 .map(|s| s.to_string())
1712 .collect();
1713 let new: HashSet<String> = ["serde", "tokio"].iter().map(|s| s.to_string()).collect();
1714
1715 let diff = DependencyDiff::compute(&old, &new);
1716
1717 assert!(diff.added.is_empty());
1718 assert_eq!(diff.removed.len(), 1);
1719 assert!(diff.removed.contains(&"anyhow".to_string()));
1720 assert!(!diff.needs_fetch());
1721 }
1722
1723 #[test]
1724 fn test_dependency_diff_no_changes() {
1725 let old: HashSet<String> = ["serde", "tokio"].iter().map(|s| s.to_string()).collect();
1726 let new: HashSet<String> = ["serde", "tokio"].iter().map(|s| s.to_string()).collect();
1727
1728 let diff = DependencyDiff::compute(&old, &new);
1729
1730 assert!(diff.added.is_empty());
1731 assert!(diff.removed.is_empty());
1732 assert!(!diff.needs_fetch());
1733 }
1734
1735 #[test]
1736 fn test_dependency_diff_empty_to_new() {
1737 let old: HashSet<String> = HashSet::new();
1738 let new: HashSet<String> = ["serde", "tokio"].iter().map(|s| s.to_string()).collect();
1739
1740 let diff = DependencyDiff::compute(&old, &new);
1741
1742 assert_eq!(diff.added.len(), 2);
1743 assert!(diff.removed.is_empty());
1744 assert!(diff.needs_fetch());
1745 }
1746
1747 #[tokio::test]
1748 async fn test_cache_pruned_on_dependency_removal() {
1749 let state = Arc::new(ServerState::new());
1750 let uri = Uri::from_file_path("/test/Cargo.toml").unwrap();
1751
1752 let content1 = r#"[dependencies]
1754serde = "1.0"
1755tokio = "1.0"
1756anyhow = "1.0"
1757"#;
1758
1759 let ecosystem = state.ecosystem_registry.get("cargo").unwrap();
1760 let parse_result1 = ecosystem.parse_manifest(content1, &uri).await.unwrap();
1761 let doc_state1 =
1762 DocumentState::new_from_parse_result("cargo", content1.to_string(), parse_result1);
1763 state.update_document(uri.clone(), doc_state1);
1764
1765 {
1767 let mut doc = state.documents.get_mut(&uri).unwrap();
1768 doc.cached_versions
1769 .insert("serde".to_string(), "1.0.210".to_string());
1770 doc.cached_versions
1771 .insert("tokio".to_string(), "1.40.0".to_string());
1772 doc.cached_versions
1773 .insert("anyhow".to_string(), "1.0.89".to_string());
1774 }
1775
1776 let content2 = r#"[dependencies]
1778serde = "1.0"
1779tokio = "1.0"
1780"#;
1781
1782 let old_dep_names: HashSet<String> = ["serde", "tokio", "anyhow"]
1784 .iter()
1785 .map(|s| s.to_string())
1786 .collect();
1787 let new_dep_names: HashSet<String> =
1788 ["serde", "tokio"].iter().map(|s| s.to_string()).collect();
1789 let diff = DependencyDiff::compute(&old_dep_names, &new_dep_names);
1790
1791 let parse_result2 = ecosystem.parse_manifest(content2, &uri).await.unwrap();
1792 let mut doc_state2 =
1793 DocumentState::new_from_parse_result("cargo", content2.to_string(), parse_result2);
1794
1795 if let Some(old_doc) = state.get_document(&uri) {
1796 preserve_cache(&mut doc_state2, &old_doc);
1797 }
1798
1799 for removed_dep in &diff.removed {
1801 doc_state2.cached_versions.remove(removed_dep);
1802 }
1803
1804 state.update_document(uri.clone(), doc_state2);
1805
1806 let doc = state.get_document(&uri).unwrap();
1808 assert_eq!(
1809 doc.cached_versions.len(),
1810 2,
1811 "anyhow should be removed from cache"
1812 );
1813 assert!(doc.cached_versions.contains_key("serde"));
1814 assert!(doc.cached_versions.contains_key("tokio"));
1815 assert!(!doc.cached_versions.contains_key("anyhow"));
1816 }
1817 }
1818}