diff --git a/Cargo.lock b/Cargo.lock index 04ce8ad8c..75a272292 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -593,6 +593,7 @@ dependencies = [ "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "tracing", ] @@ -708,6 +709,7 @@ dependencies = [ "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "tracing", "zerocopy 0.7.35", ] @@ -4575,6 +4577,7 @@ dependencies = [ "torrust-tracker-events", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "tower", "tower-http", "tracing", @@ -4614,6 +4617,7 @@ dependencies = [ "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "torrust-udp-tracker-server", "tower", "tower-http", @@ -4666,6 +4670,7 @@ dependencies = [ "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "torrust-udp-tracker-server", ] @@ -4713,6 +4718,7 @@ dependencies = [ "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "torrust-udp-tracker-server", "tracing", "tracing-subscriber", @@ -4851,12 +4857,17 @@ dependencies = [ "bittorrent-primitives", "criterion", "crossbeam-skiplist", + "futures", + "mockall", "rand 0.9.1", "rstest", + "serde", "thiserror 2.0.12", "tokio", "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-events", + "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", "tracing", @@ -4909,6 +4920,7 @@ dependencies = [ "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "tracing", "url", "uuid", diff --git a/Cargo.toml b/Cargo.toml index a15ff78df..219701d03 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,6 +55,7 @@ torrust-rest-tracker-api-core = { version = "3.0.0-develop", path = "packages/re torrust-server-lib = { version = "3.0.0-develop", path = "packages/server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packages/torrent-repository" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "packages/udp-tracker-server" } tracing = "0" tracing-subscriber = { version = "0", features = ["json"] } diff --git a/packages/axum-http-tracker-server/Cargo.toml b/packages/axum-http-tracker-server/Cargo.toml index 1b4627d41..81831a614 100644 --- a/packages/axum-http-tracker-server/Cargo.toml +++ b/packages/axum-http-tracker-server/Cargo.toml @@ -33,6 +33,7 @@ torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } tracing = "0" diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index aeb53a710..10dada2db 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -10,6 +10,7 @@ use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::server::{HttpServer, Launcher, Running, Stopped}; @@ -24,12 +25,12 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker - pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let _number_of_downloads_increased = self - .container + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + self.container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer, None); + .upsert_peer(info_hash, peer, None) + .await } } @@ -143,7 +144,15 @@ impl EnvContainer { .expect("missing HTTP tracker configuration"); let http_tracker_config = Arc::new(http_tracker_config[0].clone()); - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + configuration.core.tracker_usage_statistics.into(), + )); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &torrent_repository_container, + )); + let http_tracker_container = HttpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &http_tracker_config); diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index ff1650b9c..f7d1ed7ea 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -260,6 +260,7 @@ mod tests { use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_test_helpers::configuration::ephemeral_public; + use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::server::{HttpServer, Launcher}; @@ -279,7 +280,7 @@ mod tests { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - configuration.core.tracker_usage_statistics, + configuration.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); @@ -289,7 +290,14 @@ mod tests { let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); } - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + configuration.core.tracker_usage_statistics.into(), + )); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &torrent_repository_container, + )); let announce_service = Arc::new(AnnounceService::new( tracker_core_container.core_config.clone(), diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 7489211a9..7d7a0b386 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -168,7 +168,7 @@ mod tests { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, + config.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index 330e7c13e..8decfe95c 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -139,7 +139,7 @@ mod tests { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, + config.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/axum-http-tracker-server/tests/server/v1/contract.rs b/packages/axum-http-tracker-server/tests/server/v1/contract.rs index d1f52d55a..d864ba67c 100644 --- a/packages/axum-http-tracker-server/tests/server/v1/contract.rs +++ b/packages/axum-http-tracker-server/tests/server/v1/contract.rs @@ -474,7 +474,7 @@ mod for_all_config_modes { let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); // Add the Peer 1 - env.add_torrent_peer(&info_hash, &previously_announced_peer); + env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2. This new peer is non included on the response peer list let response = Client::new(*env.bind_address()) @@ -517,7 +517,7 @@ mod for_all_config_modes { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) .build(); - env.add_torrent_peer(&info_hash, &peer_using_ipv4); + env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; // Announce a peer using IPV6 let peer_using_ipv6 = PeerBuilder::default() @@ -527,7 +527,7 @@ mod for_all_config_modes { 8080, )) .build(); - env.add_torrent_peer(&info_hash, &peer_using_ipv6); + env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; // Announce the new Peer. let response = Client::new(*env.bind_address()) @@ -625,7 +625,7 @@ mod for_all_config_modes { let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); // Add the Peer 1 - env.add_torrent_peer(&info_hash, &previously_announced_peer); + env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 accepting compact responses let response = Client::new(*env.bind_address()) @@ -666,7 +666,7 @@ mod for_all_config_modes { let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); // Add the Peer 1 - env.add_torrent_peer(&info_hash, &previously_announced_peer); + env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list @@ -787,7 +787,8 @@ mod for_all_config_modes { .container .tracker_core_container .in_memory_torrent_repository - .get_torrent_peers(&info_hash); + .get_torrent_peers(&info_hash) + .await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), client_ip); @@ -829,7 +830,8 @@ mod for_all_config_modes { .container .tracker_core_container .in_memory_torrent_repository - .get_torrent_peers(&info_hash); + .get_torrent_peers(&info_hash) + .await; let peer_addr = peers[0].peer_addr; assert_eq!( @@ -878,7 +880,8 @@ mod for_all_config_modes { .container .tracker_core_container .in_memory_torrent_repository - .get_torrent_peers(&info_hash); + .get_torrent_peers(&info_hash) + .await; let peer_addr = peers[0].peer_addr; assert_eq!( @@ -925,7 +928,8 @@ mod for_all_config_modes { .container .tracker_core_container .in_memory_torrent_repository - .get_torrent_peers(&info_hash); + .get_torrent_peers(&info_hash) + .await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); @@ -1010,7 +1014,8 @@ mod for_all_config_modes { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ); + ) + .await; let response = Client::new(*env.bind_address()) .scrape( @@ -1050,7 +1055,8 @@ mod for_all_config_modes { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_no_bytes_pending_to_download() .build(), - ); + ) + .await; let response = Client::new(*env.bind_address()) .scrape( @@ -1282,7 +1288,8 @@ mod configured_as_whitelisted { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ); + ) + .await; let response = Client::new(*env.bind_address()) .scrape( @@ -1318,7 +1325,8 @@ mod configured_as_whitelisted { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ); + ) + .await; env.container .tracker_core_container @@ -1494,7 +1502,8 @@ mod configured_as_private { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ); + ) + .await; let response = Client::new(*env.bind_address()) .scrape( @@ -1525,7 +1534,8 @@ mod configured_as_private { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ); + ) + .await; let expiring_key = env .container @@ -1576,7 +1586,8 @@ mod configured_as_private { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ); + ) + .await; let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); diff --git a/packages/axum-rest-tracker-api-server/Cargo.toml b/packages/axum-rest-tracker-api-server/Cargo.toml index d1491c96e..296f77d61 100644 --- a/packages/axum-rest-tracker-api-server/Cargo.toml +++ b/packages/axum-rest-tracker-api-server/Cargo.toml @@ -39,6 +39,7 @@ torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index 275d72574..92ca5a2d1 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -12,6 +12,7 @@ use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use crate::server::{ApiServer, Launcher, Running, Stopped}; @@ -32,12 +33,12 @@ where S: std::fmt::Debug + std::fmt::Display, { /// Add a torrent to the tracker - pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let _number_of_downloads_increased = self - .container + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + self.container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer, None); + .upsert_peer(info_hash, peer, None) + .await } } @@ -172,14 +173,25 @@ impl EnvContainer { .clone(), ); - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &torrent_repository_container, + )); + let http_tracker_core_container = HttpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &http_tracker_config); + let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &udp_tracker_config); + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); let tracker_http_api_core_container = TrackerHttpApiCoreContainer::initialize_from( + &torrent_repository_container, &tracker_core_container, &http_tracker_core_container, &udp_tracker_core_container, diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs index 17d3e4f2d..552958d74 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs @@ -69,6 +69,7 @@ pub async fn get_metrics_handler( State(state): State<( Arc, Arc>, + Arc, Arc, Arc, Arc, @@ -81,6 +82,7 @@ pub async fn get_metrics_handler( state.2.clone(), state.3.clone(), state.4.clone(), + state.5.clone(), ) .await; diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index c19f08b2a..3eeaa8bf4 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -28,6 +28,7 @@ pub fn add(prefix: &str, router: Router, http_api_container: &Arc Response { match InfoHash::from_str(&info_hash.0) { Err(_) => invalid_info_hash_param_response(&info_hash.0), - Ok(info_hash) => match get_torrent_info(&in_memory_torrent_repository, &info_hash) { + Ok(info_hash) => match get_torrent_info(&in_memory_torrent_repository, &info_hash).await { Some(info) => torrent_info_response(info).into_response(), None => torrent_not_known_response(), }, @@ -85,14 +85,19 @@ pub async fn get_torrents_handler( tracing::debug!("pagination: {:?}", pagination); if pagination.0.info_hashes.is_empty() { - torrent_list_response(&get_torrents_page( - &in_memory_torrent_repository, - Some(&Pagination::new_with_options(pagination.0.offset, pagination.0.limit)), - )) + torrent_list_response( + &get_torrents_page( + &in_memory_torrent_repository, + Some(&Pagination::new_with_options(pagination.0.offset, pagination.0.limit)), + ) + .await, + ) .into_response() } else { match parse_info_hashes(pagination.0.info_hashes) { - Ok(info_hashes) => torrent_list_response(&get_torrents(&in_memory_torrent_repository, &info_hashes)).into_response(), + Ok(info_hashes) => { + torrent_list_response(&get_torrents(&in_memory_torrent_repository, &info_hashes).await).into_response() + } Err(err) => match err { QueryParamError::InvalidInfoHash { info_hash } => invalid_info_hash_param_response(&info_hash), }, diff --git a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs index 51a4804e7..7cae0abbf 100644 --- a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs @@ -21,7 +21,8 @@ async fn should_allow_getting_tracker_statistics() { env.add_torrent_peer( &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), // DevSkim: ignore DS173237 &PeerBuilder::default().into(), - ); + ) + .await; let request_id = Uuid::new_v4(); diff --git a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs index 42421db99..ae9819785 100644 --- a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs @@ -26,7 +26,7 @@ async fn should_allow_getting_all_torrents() { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); @@ -59,8 +59,8 @@ async fn should_allow_limiting_the_torrents_in_the_result() { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); - env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); @@ -96,8 +96,8 @@ async fn should_allow_the_torrents_result_pagination() { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); - env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); @@ -132,8 +132,8 @@ async fn should_allow_getting_a_list_of_torrents_providing_infohashes() { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); - env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); @@ -307,7 +307,7 @@ async fn should_allow_getting_a_torrent_info() { let peer = PeerBuilder::default().into(); - env.add_torrent_peer(&info_hash, &peer); + env.add_torrent_peer(&info_hash, &peer).await; let request_id = Uuid::new_v4(); @@ -389,7 +389,7 @@ async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); diff --git a/packages/events/src/bus.rs b/packages/events/src/bus.rs index d53f29b8d..b42fb4fc5 100644 --- a/packages/events/src/bus.rs +++ b/packages/events/src/bus.rs @@ -3,36 +3,60 @@ use std::sync::Arc; use crate::broadcaster::Broadcaster; use crate::{receiver, sender}; +#[derive(Clone, Debug)] +pub enum SenderStatus { + Enabled, + Disabled, +} + +impl From for SenderStatus { + fn from(enabled: bool) -> Self { + if enabled { + Self::Enabled + } else { + Self::Disabled + } + } +} + +impl From for bool { + fn from(sender_status: SenderStatus) -> Self { + match sender_status { + SenderStatus::Enabled => true, + SenderStatus::Disabled => false, + } + } +} + #[derive(Clone, Debug)] pub struct EventBus { - pub enable_sender: bool, + pub sender_status: SenderStatus, pub broadcaster: Broadcaster, } impl Default for EventBus { fn default() -> Self { - let enable_sender = true; + let sender_status = SenderStatus::Enabled; let broadcaster = Broadcaster::::default(); - Self::new(enable_sender, broadcaster) + Self::new(sender_status, broadcaster) } } impl EventBus { #[must_use] - pub fn new(enable_sender: bool, broadcaster: Broadcaster) -> Self { + pub fn new(sender_status: SenderStatus, broadcaster: Broadcaster) -> Self { Self { - enable_sender, + sender_status, broadcaster, } } #[must_use] pub fn sender(&self) -> Option>> { - if self.enable_sender { - Some(Arc::new(self.broadcaster.clone())) - } else { - None + match self.sender_status { + SenderStatus::Enabled => Some(Arc::new(self.broadcaster.clone())), + SenderStatus::Disabled => None, } } @@ -50,14 +74,14 @@ mod tests { #[tokio::test] async fn it_should_provide_an_event_sender_when_enabled() { - let bus = EventBus::::new(true, Broadcaster::default()); + let bus = EventBus::::new(SenderStatus::Enabled, Broadcaster::default()); assert!(bus.sender().is_some()); } #[tokio::test] async fn it_should_not_provide_event_sender_when_disabled() { - let bus = EventBus::::new(false, Broadcaster::default()); + let bus = EventBus::::new(SenderStatus::Disabled, Broadcaster::default()); assert!(bus.sender().is_none()); } diff --git a/packages/events/src/sender.rs b/packages/events/src/sender.rs index 9fc77f650..3dccade4c 100644 --- a/packages/events/src/sender.rs +++ b/packages/events/src/sender.rs @@ -1,4 +1,5 @@ use std::fmt; +use std::fmt::Debug; use futures::future::BoxFuture; #[cfg(test)] diff --git a/packages/http-tracker-core/Cargo.toml b/packages/http-tracker-core/Cargo.toml index 5473c5a25..37b540e39 100644 --- a/packages/http-tracker-core/Cargo.toml +++ b/packages/http-tracker-core/Cargo.toml @@ -28,6 +28,7 @@ torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configur torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } tracing = "0" [dev-dependencies] diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 7ee91a2c4..cfb3f745f 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -62,7 +62,7 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, + config.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 681d4a4f4..f063c0061 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use bittorrent_tracker_core::container::TrackerCoreContainer; use torrust_tracker_configuration::{Core, HttpTracker}; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; @@ -26,7 +27,15 @@ pub struct HttpTrackerCoreContainer { impl HttpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc, http_tracker_config: &Arc) -> Arc { - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + core_config, + &torrent_repository_container, + )); + Self::initialize_from_tracker_core(&tracker_core_container, http_tracker_config) } @@ -36,6 +45,7 @@ impl HttpTrackerCoreContainer { http_tracker_config: &Arc, ) -> Arc { let http_tracker_core_services = HttpTrackerCoreServices::initialize_from(tracker_core_container); + Self::initialize_from_services(tracker_core_container, &http_tracker_core_services, http_tracker_config) } @@ -72,7 +82,7 @@ impl HttpTrackerCoreServices { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - tracker_core_container.core_config.tracker_usage_statistics, + tracker_core_container.core_config.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index e0f387273..9f39a04e4 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -256,7 +256,7 @@ mod tests { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, + config.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 70e30099c..3da1aa88f 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -255,6 +255,7 @@ mod tests { use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{ClientIpSources, RemoteClientAddr, ResolvedIp}; use bittorrent_tracker_core::announce_handler::PeersWanted; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; @@ -276,7 +277,7 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); - let http_stats_event_bus = Arc::new(EventBus::new(false, http_core_broadcaster.clone())); + let http_stats_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, http_core_broadcaster.clone())); let http_stats_event_sender = http_stats_event_bus.sender(); @@ -446,6 +447,7 @@ mod tests { use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{ClientIpSources, RemoteClientAddr, ResolvedIp}; use bittorrent_tracker_core::announce_handler::PeersWanted; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use torrust_tracker_test_helpers::configuration; @@ -468,7 +470,7 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); - let http_stats_event_bus = Arc::new(EventBus::new(false, http_core_broadcaster.clone())); + let http_stats_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, http_core_broadcaster.clone())); let http_stats_event_sender = http_stats_event_bus.sender(); diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index 8d2ad1aa2..f5506f6e3 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -9,10 +9,6 @@ use crate::event::Event; use crate::statistics::repository::Repository; use crate::statistics::HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; -/// # Panics -/// -/// This function panics if the client IP address is not the same as the IP -/// version of the event. pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { match event { Event::TcpAnnounce { connection, .. } => { diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index 1c5890ea8..af1e30524 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -47,7 +47,7 @@ pub async fn get_metrics( in_memory_torrent_repository: Arc, stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; let stats = stats_repository.get_stats().await; TrackerMetrics { @@ -96,7 +96,7 @@ mod tests { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, + config.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 438f3b03a..83b08f178 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -140,7 +140,12 @@ impl MetricCollection { /// /// Return an error if a metrics of a different type with the same name /// already exists. - pub fn increase_gauge(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) -> Result<(), Error> { + pub fn increment_gauge( + &mut self, + name: &MetricName, + label_set: &LabelSet, + time: DurationSinceUnixEpoch, + ) -> Result<(), Error> { if self.counters.metrics.contains_key(name) { return Err(Error::MetricNameCollisionAdding { metric_name: name.clone(), @@ -156,7 +161,12 @@ impl MetricCollection { /// /// Return an error if a metrics of a different type with the same name /// already exists. - pub fn decrease_gauge(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) -> Result<(), Error> { + pub fn decrement_gauge( + &mut self, + name: &MetricName, + label_set: &LabelSet, + time: DurationSinceUnixEpoch, + ) -> Result<(), Error> { if self.counters.metrics.contains_key(name) { return Err(Error::MetricNameCollisionAdding { metric_name: name.clone(), diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index bd753b220..57ca3909d 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -22,8 +22,10 @@ //! }; //! ``` +use std::fmt; use std::net::{IpAddr, SocketAddr}; use std::ops::{Deref, DerefMut}; +use std::str::FromStr; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; @@ -34,6 +36,57 @@ use crate::DurationSinceUnixEpoch; pub type PeerAnnouncement = Peer; +#[derive(Debug, Serialize, Copy, Clone, PartialEq, Eq, Hash)] +#[serde(rename_all_fields = "lowercase")] +pub enum PeerRole { + Seeder, + Leecher, +} + +impl PeerRole { + /// Returns the opposite role: Seeder becomes Leecher, and vice versa. + #[must_use] + pub fn opposite(self) -> Self { + match self { + PeerRole::Seeder => PeerRole::Leecher, + PeerRole::Leecher => PeerRole::Seeder, + } + } +} + +impl fmt::Display for PeerRole { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + PeerRole::Seeder => write!(f, "seeder"), + PeerRole::Leecher => write!(f, "leecher"), + } + } +} + +impl FromStr for PeerRole { + type Err = ParsePeerRoleError; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "seeder" => Ok(PeerRole::Seeder), + "leecher" => Ok(PeerRole::Leecher), + _ => Err(ParsePeerRoleError::InvalidPeerRole { + location: Location::caller(), + raw_param: s.to_string(), + }), + } + } +} + +#[derive(Error, Debug)] +pub enum ParsePeerRoleError { + #[error("invalid param {raw_param} in {location}")] + InvalidPeerRole { + location: &'static Location<'static>, + raw_param: String, + }, +} + /// Peer struct used by the core `Tracker`. /// /// A sample peer: @@ -147,6 +200,7 @@ impl PartialOrd for Peer { pub trait ReadInfo { fn is_seeder(&self) -> bool; + fn is_leecher(&self) -> bool; fn get_event(&self) -> AnnounceEvent; fn get_id(&self) -> PeerId; fn get_updated(&self) -> DurationSinceUnixEpoch; @@ -158,6 +212,10 @@ impl ReadInfo for Peer { self.left.0.get() <= 0 && self.event != AnnounceEvent::Stopped } + fn is_leecher(&self) -> bool { + !self.is_seeder() + } + fn get_event(&self) -> AnnounceEvent { self.event } @@ -180,6 +238,10 @@ impl ReadInfo for Arc { self.left.0.get() <= 0 && self.event != AnnounceEvent::Stopped } + fn is_leecher(&self) -> bool { + !self.is_seeder() + } + fn get_event(&self) -> AnnounceEvent { self.event } @@ -203,6 +265,25 @@ impl Peer { self.left.0.get() <= 0 && self.event != AnnounceEvent::Stopped } + #[must_use] + pub fn is_leecher(&self) -> bool { + !self.is_seeder() + } + + #[must_use] + pub fn is_completed(&self) -> bool { + self.event == AnnounceEvent::Completed + } + + #[must_use] + pub fn role(&self) -> PeerRole { + if self.is_seeder() { + PeerRole::Seeder + } else { + PeerRole::Leecher + } + } + pub fn ip(&mut self) -> IpAddr { self.peer_addr.ip() } @@ -210,6 +291,26 @@ impl Peer { pub fn change_ip(&mut self, new_ip: &IpAddr) { self.peer_addr = SocketAddr::new(*new_ip, self.peer_addr.port()); } + + pub fn mark_as_completed(&mut self) { + self.event = AnnounceEvent::Completed; + } + + #[must_use] + pub fn into_completed(self) -> Self { + Self { + event: AnnounceEvent::Completed, + ..self + } + } + + #[must_use] + pub fn into_seeder(self) -> Self { + Self { + left: NumberOfBytes::new(0), + ..self + } + } } use std::panic::Location; @@ -478,6 +579,12 @@ pub mod fixture { self } + #[must_use] + pub fn with_event(mut self, event: AnnounceEvent) -> Self { + self.peer.event = event; + self + } + #[allow(dead_code)] #[must_use] pub fn build(self) -> Peer { diff --git a/packages/rest-tracker-api-core/Cargo.toml b/packages/rest-tracker-api-core/Cargo.toml index 0077572fb..de1946239 100644 --- a/packages/rest-tracker-api-core/Cargo.toml +++ b/packages/rest-tracker-api-core/Cargo.toml @@ -21,6 +21,7 @@ tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } [dev-dependencies] diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index ec3786dfb..1c4a08e26 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -7,12 +7,22 @@ use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; pub struct TrackerHttpApiCoreContainer { pub http_api_config: Arc, + + // Torrent repository + pub torrent_repository_container: Arc, + + // Tracker core pub tracker_core_container: Arc, + + // HTTP tracker core pub http_stats_repository: Arc, + + // UDP tracker core pub ban_service: Arc>, pub udp_core_stats_repository: Arc, pub udp_server_stats_repository: Arc, @@ -26,14 +36,25 @@ impl TrackerHttpApiCoreContainer { udp_tracker_config: &Arc, http_api_config: &Arc, ) -> Arc { - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + core_config, + &torrent_repository_container, + )); + let http_tracker_core_container = HttpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, http_tracker_config); + let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, udp_tracker_config); + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(core_config); Self::initialize_from( + &torrent_repository_container, &tracker_core_container, &http_tracker_core_container, &udp_tracker_core_container, @@ -44,6 +65,7 @@ impl TrackerHttpApiCoreContainer { #[must_use] pub fn initialize_from( + torrent_repository_container: &Arc, tracker_core_container: &Arc, http_tracker_core_container: &Arc, udp_tracker_core_container: &Arc, @@ -51,16 +73,21 @@ impl TrackerHttpApiCoreContainer { http_api_config: &Arc, ) -> Arc { Arc::new(TrackerHttpApiCoreContainer { + http_api_config: http_api_config.clone(), + + // Torrent repository + torrent_repository_container: torrent_repository_container.clone(), + + // Tracker core tracker_core_container: tracker_core_container.clone(), + // HTTP tracker core http_stats_repository: http_tracker_core_container.stats_repository.clone(), + // UDP tracker core ban_service: udp_tracker_core_container.ban_service.clone(), udp_core_stats_repository: udp_tracker_core_container.stats_repository.clone(), - udp_server_stats_repository: udp_tracker_server_container.stats_repository.clone(), - - http_api_config: http_api_config.clone(), }) } } diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 8d5b7514a..d05a35981 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -32,7 +32,7 @@ pub async fn get_metrics( http_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); let http_stats = http_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; @@ -93,6 +93,7 @@ pub struct TrackerLabeledMetrics { pub async fn get_labeled_metrics( in_memory_torrent_repository: Arc, ban_service: Arc>, + swarms_stats_repository: Arc, http_stats_repository: Arc, udp_stats_repository: Arc, udp_server_stats_repository: Arc, @@ -100,12 +101,17 @@ pub async fn get_labeled_metrics( let _torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); let _udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); + let swarms_stats = swarms_stats_repository.get_metrics().await; let http_stats = http_stats_repository.get_stats().await; let udp_stats_repository = udp_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; // Merge all the metrics into a single collection let mut metrics = MetricCollection::default(); + + metrics + .merge(&swarms_stats.metric_collection) + .expect("msg: failed to merge torrent repository metrics"); metrics .merge(&http_stats.metric_collection) .expect("msg: failed to merge HTTP core metrics"); @@ -154,7 +160,7 @@ mod tests { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, + config.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 3396cd961..98ae5817d 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -19,16 +19,21 @@ version.workspace = true aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" crossbeam-skiplist = "0" +futures = "0" +serde = { version = "1.0.219", features = ["derive"] } thiserror = "2.0.12" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } +torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } tracing = "0" [dev-dependencies] async-std = { version = "1", features = ["attributes", "tokio1"] } criterion = { version = "0", features = ["async_tokio"] } +mockall = "0" rand = "0" rstest = "0" torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } diff --git a/packages/torrent-repository/src/container.rs b/packages/torrent-repository/src/container.rs new file mode 100644 index 000000000..d185180b1 --- /dev/null +++ b/packages/torrent-repository/src/container.rs @@ -0,0 +1,38 @@ +use std::sync::Arc; + +use torrust_tracker_events::bus::SenderStatus; + +use crate::event::bus::EventBus; +use crate::event::sender::Broadcaster; +use crate::event::{self}; +use crate::statistics::repository::Repository; +use crate::{statistics, Swarms}; + +pub struct TorrentRepositoryContainer { + pub swarms: Arc, + pub event_bus: Arc, + pub stats_event_sender: event::sender::Sender, + pub stats_repository: Arc, +} + +impl TorrentRepositoryContainer { + #[must_use] + pub fn initialize(sender_status: SenderStatus) -> Self { + // Torrent repository stats + let broadcaster = Broadcaster::default(); + let stats_repository = Arc::new(Repository::new()); + + let event_bus = Arc::new(EventBus::new(sender_status, broadcaster.clone())); + + let stats_event_sender = event_bus.sender(); + + let swarms = Arc::new(Swarms::new(stats_event_sender.clone())); + + Self { + swarms, + event_bus, + stats_event_sender, + stats_repository, + } + } +} diff --git a/packages/torrent-repository/src/event.rs b/packages/torrent-repository/src/event.rs new file mode 100644 index 000000000..65a65ce8c --- /dev/null +++ b/packages/torrent-repository/src/event.rs @@ -0,0 +1,111 @@ +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::{Peer, PeerAnnouncement}; + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Event { + TorrentAdded { + info_hash: InfoHash, + announcement: PeerAnnouncement, + }, + TorrentRemoved { + info_hash: InfoHash, + }, + PeerAdded { + info_hash: InfoHash, + peer: Peer, + }, + PeerRemoved { + info_hash: InfoHash, + peer: Peer, + }, + PeerUpdated { + info_hash: InfoHash, + old_peer: Peer, + new_peer: Peer, + }, + PeerDownloadCompleted { + info_hash: InfoHash, + peer: Peer, + }, +} + +pub mod sender { + use std::sync::Arc; + + use super::Event; + + pub type Sender = Option>>; + pub type Broadcaster = torrust_tracker_events::broadcaster::Broadcaster; + + #[cfg(test)] + pub mod tests { + + use futures::future::{self, BoxFuture}; + use mockall::mock; + use mockall::predicate::eq; + use torrust_tracker_events::sender::{SendError, Sender}; + + use crate::event::Event; + + mock! { + pub EventSender {} + + impl Sender for EventSender { + type Event = Event; + + fn send(&self, event: Event) -> BoxFuture<'static,Option > > > ; + } + } + + pub fn expect_event(mock: &mut MockEventSender, event: Event) { + mock.expect_send() + .with(eq(event)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + } + + pub fn expect_event_sequence(mock: &mut MockEventSender, event: Vec) { + for e in event { + expect_event(mock, e); + } + } + } +} + +pub mod receiver { + use super::Event; + + pub type Receiver = Box>; +} + +pub mod bus { + use crate::event::Event; + + pub type EventBus = torrust_tracker_events::bus::EventBus; +} + +#[cfg(test)] +pub mod test { + + use torrust_tracker_primitives::peer::Peer; + + use super::Event; + use crate::tests::sample_info_hash; + + #[test] + fn events_should_be_comparable() { + let info_hash = sample_info_hash(); + + let event1 = Event::TorrentAdded { + info_hash, + announcement: Peer::default(), + }; + + let event2 = Event::TorrentRemoved { info_hash }; + + let event1_clone = event1.clone(); + + assert!(event1 == event1_clone); + assert!(event1 != event2); + } +} diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index a4e7d9c5d..3adf2f18d 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,8 +1,12 @@ +pub mod container; +pub mod event; +pub mod statistics; pub mod swarm; pub mod swarms; -use std::sync::{Arc, Mutex, MutexGuard}; +use std::sync::Arc; +use tokio::sync::Mutex; use torrust_tracker_clock::clock; pub type Swarms = swarms::Swarms; @@ -19,15 +23,7 @@ pub(crate) type CurrentClock = clock::Working; #[allow(dead_code)] pub(crate) type CurrentClock = clock::Stopped; -pub trait LockTrackedTorrent { - fn lock_or_panic(&self) -> MutexGuard<'_, Swarm>; -} - -impl LockTrackedTorrent for SwarmHandle { - fn lock_or_panic(&self) -> MutexGuard<'_, Swarm> { - self.lock().expect("can't acquire lock for tracked torrent handle") - } -} +pub const TORRENT_REPOSITORY_LOG_TARGET: &str = "TORRENT_REPOSITORY"; #[cfg(test)] pub(crate) mod tests { diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs new file mode 100644 index 000000000..2b61839b8 --- /dev/null +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -0,0 +1,434 @@ +use std::sync::Arc; + +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::{label_name, metric_name}; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::Event; +use crate::statistics::repository::Repository; +use crate::statistics::{ + TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL, TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL, +}; + +pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { + match event { + // Torrent events + Event::TorrentAdded { info_hash, .. } => { + tracing::debug!(info_hash = ?info_hash, "Torrent added",); + + let _unused = stats_repository + .increment_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) + .await; + } + Event::TorrentRemoved { info_hash } => { + tracing::debug!(info_hash = ?info_hash, "Torrent removed",); + + let _unused = stats_repository + .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) + .await; + } + + // Peer events + Event::PeerAdded { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer added", ); + + let _unused = stats_repository + .increment_gauge( + &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), + &label_set_for_peer(&peer), + now, + ) + .await; + } + Event::PeerRemoved { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer removed", ); + + let _unused = stats_repository + .decrement_gauge( + &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), + &label_set_for_peer(&peer), + now, + ) + .await; + } + Event::PeerUpdated { + info_hash, + old_peer, + new_peer, + } => { + tracing::debug!(info_hash = ?info_hash, old_peer = ?old_peer, new_peer = ?new_peer, "Peer updated", ); + + if old_peer.role() != new_peer.role() { + let _unused = stats_repository + .increment_gauge( + &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), + &label_set_for_peer(&new_peer), + now, + ) + .await; + + let _unused = stats_repository + .decrement_gauge( + &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), + &label_set_for_peer(&old_peer), + now, + ) + .await; + } + } + Event::PeerDownloadCompleted { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer download completed", ); + + let _unused = stats_repository + .increment_counter( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), + &label_set_for_peer(&peer), + now, + ) + .await; + } + } +} + +/// Returns the label set to be included in the metrics for the given peer. +fn label_set_for_peer(peer: &Peer) -> LabelSet { + if peer.is_seeder() { + (label_name!("peer_role"), LabelValue::new("seeder")).into() + } else { + (label_name!("peer_role"), LabelValue::new("leecher")).into() + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use aquatic_udp_protocol::NumberOfBytes; + use torrust_tracker_metrics::label::LabelSet; + use torrust_tracker_metrics::metric::MetricName; + use torrust_tracker_primitives::peer::{Peer, PeerRole}; + + use crate::statistics::repository::Repository; + use crate::tests::{leecher, seeder}; + + fn make_peer(role: PeerRole) -> Peer { + match role { + PeerRole::Seeder => seeder(), + PeerRole::Leecher => leecher(), + } + } + + // It returns a peer with the opposite role of the given peer. + fn make_opposite_role_peer(peer: &Peer) -> Peer { + let mut opposite_role_peer = *peer; + + match peer.role() { + PeerRole::Seeder => { + opposite_role_peer.left = NumberOfBytes::new(1); + } + PeerRole::Leecher => { + opposite_role_peer.left = NumberOfBytes::new(0); + } + } + + opposite_role_peer + } + + async fn expect_counter_metric_to_be( + stats_repository: &Arc, + metric_name: &MetricName, + label_set: &LabelSet, + expected_value: u64, + ) { + let value = get_counter_metric(stats_repository, metric_name, label_set).await; + assert_eq!(value.to_string(), expected_value.to_string()); + } + + async fn get_counter_metric(stats_repository: &Arc, metric_name: &MetricName, label_set: &LabelSet) -> u64 { + stats_repository + .get_metrics() + .await + .metric_collection + .get_counter_value(metric_name, label_set) + .unwrap_or_else(|| panic!("Failed to get counter value for metric name '{metric_name}' and label set '{label_set}'")) + .value() + } + + async fn expect_gauge_metric_to_be( + stats_repository: &Arc, + metric_name: &MetricName, + label_set: &LabelSet, + expected_value: f64, + ) { + let value = get_gauge_metric(stats_repository, metric_name, label_set).await; + assert_eq!(value.to_string(), expected_value.to_string()); + } + + async fn get_gauge_metric(stats_repository: &Arc, metric_name: &MetricName, label_set: &LabelSet) -> f64 { + stats_repository + .get_metrics() + .await + .metric_collection + .get_gauge_value(metric_name, label_set) + .unwrap_or_else(|| panic!("Failed to get gauge value for metric name '{metric_name}' and label set '{label_set}'")) + .value() + } + + mod for_torrent_metrics { + + use std::sync::Arc; + + use torrust_tracker_clock::clock::stopped::Stopped; + use torrust_tracker_clock::clock::{self, Time}; + use torrust_tracker_metrics::label::LabelSet; + use torrust_tracker_metrics::metric_name; + + use crate::event::Event; + use crate::statistics::event::handler::handle_event; + use crate::statistics::event::handler::tests::expect_gauge_metric_to_be; + use crate::statistics::repository::Repository; + use crate::statistics::TORRENT_REPOSITORY_TORRENTS_TOTAL; + use crate::tests::{sample_info_hash, sample_peer}; + use crate::CurrentClock; + + #[tokio::test] + async fn it_should_increment_the_number_of_torrents_when_a_torrent_added_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + handle_event( + Event::TorrentAdded { + info_hash: sample_info_hash(), + announcement: sample_peer(), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_gauge_metric_to_be( + &stats_repository, + &metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), + &LabelSet::default(), + 1.0, + ) + .await; + } + + #[tokio::test] + async fn it_should_decrement_the_number_of_torrents_when_a_torrent_removed_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + let metric_name = metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL); + let label_set = LabelSet::default(); + + // Increment the gauge first to simulate a torrent being added. + stats_repository + .increment_gauge(&metric_name, &label_set, CurrentClock::now()) + .await + .unwrap(); + + handle_event( + Event::TorrentRemoved { + info_hash: sample_info_hash(), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_gauge_metric_to_be(&stats_repository, &metric_name, &label_set, 0.0).await; + } + } + + mod for_peer_metrics { + + mod peer_connections_total { + + use std::sync::Arc; + + use rstest::rstest; + use torrust_tracker_clock::clock::stopped::Stopped; + use torrust_tracker_clock::clock::{self, Time}; + use torrust_tracker_metrics::label::LabelValue; + use torrust_tracker_metrics::{label_name, metric_name}; + use torrust_tracker_primitives::peer::PeerRole; + + use crate::event::Event; + use crate::statistics::event::handler::handle_event; + use crate::statistics::event::handler::tests::{ + expect_gauge_metric_to_be, get_gauge_metric, make_opposite_role_peer, make_peer, + }; + use crate::statistics::repository::Repository; + use crate::statistics::TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL; + use crate::tests::sample_info_hash; + use crate::CurrentClock; + + #[rstest] + #[case("seeder")] + #[case("leecher")] + #[tokio::test] + async fn it_should_increment_the_number_of_peer_connections_when_a_peer_added_event_is_received( + #[case] role: PeerRole, + ) { + clock::Stopped::local_set_to_unix_epoch(); + + let peer = make_peer(role); + + let stats_repository = Arc::new(Repository::new()); + let metric_name = metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL); + let label_set = (label_name!("peer_role"), LabelValue::new(&role.to_string())).into(); + + handle_event( + Event::PeerAdded { + info_hash: sample_info_hash(), + peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_gauge_metric_to_be(&stats_repository, &metric_name, &label_set, 1.0).await; + } + + #[rstest] + #[case("seeder")] + #[case("leecher")] + #[tokio::test] + async fn it_should_decrement_the_number_of_peer_connections_when_a_peer_removed_event_is_received( + #[case] role: PeerRole, + ) { + clock::Stopped::local_set_to_unix_epoch(); + + let peer = make_peer(role); + + let stats_repository = Arc::new(Repository::new()); + + let metric_name = metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL); + let label_set = (label_name!("peer_role"), LabelValue::new(&role.to_string())).into(); + + // Increment the gauge first to simulate a peer being added. + stats_repository + .increment_gauge(&metric_name, &label_set, CurrentClock::now()) + .await + .unwrap(); + + handle_event( + Event::PeerRemoved { + info_hash: sample_info_hash(), + peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_gauge_metric_to_be(&stats_repository, &metric_name, &label_set, 0.0).await; + } + + #[rstest] + #[case("seeder")] + #[case("leecher")] + #[tokio::test] + async fn it_should_adjust_the_number_of_seeders_and_leechers_when_a_peer_updated_event_is_received_and_the_peer_changed_its_role( + #[case] old_role: PeerRole, + ) { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + let old_peer = make_peer(old_role); + let new_peer = make_opposite_role_peer(&old_peer); + + let metric_name = metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL); + let old_role_label_set = (label_name!("peer_role"), LabelValue::new(&old_peer.role().to_string())).into(); + let new_role_label_set = (label_name!("peer_role"), LabelValue::new(&new_peer.role().to_string())).into(); + + // Increment the gauge first by simulating a peer was added. + handle_event( + Event::PeerAdded { + info_hash: sample_info_hash(), + peer: old_peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + let old_role_total = get_gauge_metric(&stats_repository, &metric_name, &old_role_label_set).await; + let new_role_total = 0.0; + + // The peer's role has changed, so we need to increment the new + // role and decrement the old one. + handle_event( + Event::PeerUpdated { + info_hash: sample_info_hash(), + old_peer, + new_peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + // The peer's role has changed, so the new role has incremented. + expect_gauge_metric_to_be(&stats_repository, &metric_name, &new_role_label_set, new_role_total + 1.0).await; + + // And the old role has decremented. + expect_gauge_metric_to_be(&stats_repository, &metric_name, &old_role_label_set, old_role_total - 1.0).await; + } + } + + mod torrent_downloads_total { + + use std::sync::Arc; + + use rstest::rstest; + use torrust_tracker_clock::clock::stopped::Stopped; + use torrust_tracker_clock::clock::{self, Time}; + use torrust_tracker_metrics::label::LabelValue; + use torrust_tracker_metrics::{label_name, metric_name}; + use torrust_tracker_primitives::peer::PeerRole; + + use crate::event::Event; + use crate::statistics::event::handler::handle_event; + use crate::statistics::event::handler::tests::{expect_counter_metric_to_be, make_peer}; + use crate::statistics::repository::Repository; + use crate::statistics::TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL; + use crate::tests::sample_info_hash; + use crate::CurrentClock; + + #[rstest] + #[case("seeder")] + #[case("leecher")] + #[tokio::test] + async fn it_should_increment_the_number_of_downloads_when_a_peer_downloaded_event_is_received( + #[case] role: PeerRole, + ) { + clock::Stopped::local_set_to_unix_epoch(); + + let peer = make_peer(role); + + let stats_repository = Arc::new(Repository::new()); + let metric_name = metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL); + let label_set = (label_name!("peer_role"), LabelValue::new(&role.to_string())).into(); + + handle_event( + Event::PeerDownloadCompleted { + info_hash: sample_info_hash(), + peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be(&stats_repository, &metric_name, &label_set, 1).await; + } + } + } +} diff --git a/packages/torrent-repository/src/statistics/event/listener.rs b/packages/torrent-repository/src/statistics/event/listener.rs new file mode 100644 index 000000000..f3b534332 --- /dev/null +++ b/packages/torrent-repository/src/statistics/event/listener.rs @@ -0,0 +1,57 @@ +use std::sync::Arc; + +use tokio::task::JoinHandle; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_events::receiver::RecvError; + +use super::handler::handle_event; +use crate::event::receiver::Receiver; +use crate::statistics::repository::Repository; +use crate::{CurrentClock, TORRENT_REPOSITORY_LOG_TARGET}; + +#[must_use] +pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { + let stats_repository = repository.clone(); + + tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Starting torrent repository event listener"); + + tokio::spawn(async move { + dispatch_events(receiver, stats_repository).await; + + tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Torrent repository listener finished"); + }) +} + +async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { + let shutdown_signal = tokio::signal::ctrl_c(); + + tokio::pin!(shutdown_signal); + + loop { + tokio::select! { + biased; + + _ = &mut shutdown_signal => { + tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Received Ctrl+C, shutting down torrent repository event listener."); + break; + } + + result = receiver.recv() => { + match result { + Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, + Err(e) => { + match e { + RecvError::Closed => { + tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Torrent repository event receiver closed."); + break; + } + RecvError::Lagged(n) => { + tracing::warn!(target: TORRENT_REPOSITORY_LOG_TARGET, "Torrent repository event receiver lagged by {} events.", n); + } + } + } + } + } + } + } +} diff --git a/packages/torrent-repository/src/statistics/event/mod.rs b/packages/torrent-repository/src/statistics/event/mod.rs new file mode 100644 index 000000000..dae683398 --- /dev/null +++ b/packages/torrent-repository/src/statistics/event/mod.rs @@ -0,0 +1,2 @@ +pub mod handler; +pub mod listener; diff --git a/packages/torrent-repository/src/statistics/metrics.rs b/packages/torrent-repository/src/statistics/metrics.rs new file mode 100644 index 000000000..f8ab3f9d9 --- /dev/null +++ b/packages/torrent-repository/src/statistics/metrics.rs @@ -0,0 +1,63 @@ +use serde::Serialize; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +/// Metrics collected by the torrent repository. +#[derive(Debug, Clone, PartialEq, Default, Serialize)] +pub struct Metrics { + /// A collection of metrics. + pub metric_collection: MetricCollection, +} + +impl Metrics { + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increment_counter( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increase_counter(metric_name, labels, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn set_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.set_gauge(metric_name, labels, value, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increment_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increment_gauge(metric_name, labels, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn decrement_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.decrement_gauge(metric_name, labels, now) + } +} diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs new file mode 100644 index 000000000..18dcf83ea --- /dev/null +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -0,0 +1,57 @@ +pub mod event; +pub mod metrics; +pub mod repository; + +use metrics::Metrics; +use torrust_tracker_metrics::metric::description::MetricDescription; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_metrics::unit::Unit; + +// Torrent metrics + +const TORRENT_REPOSITORY_TORRENTS_TOTAL: &str = "torrent_repository_torrents_total"; +const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; + +// Peers metrics + +const TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL: &str = "torrent_repository_peer_connections_total"; +const TORRENT_REPOSITORY_UNIQUE_PEERS_TOTAL: &str = "torrent_repository_unique_peers_total"; // todo: not implemented yet + +#[must_use] +pub fn describe_metrics() -> Metrics { + let mut metrics = Metrics::default(); + + // Torrent metrics + + metrics.metric_collection.describe_gauge( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of torrents.")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new( + "The total number of torrent downloads (since the tracker process started).", + )), + ); + + // Peers metrics + + metrics.metric_collection.describe_gauge( + &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new( + "The total number of peer connections (one connection per torrent).", + )), + ); + + metrics.metric_collection.describe_gauge( + &metric_name!(TORRENT_REPOSITORY_UNIQUE_PEERS_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of unique peers.")), + ); + + metrics +} diff --git a/packages/torrent-repository/src/statistics/repository.rs b/packages/torrent-repository/src/statistics/repository.rs new file mode 100644 index 000000000..1e376faf7 --- /dev/null +++ b/packages/torrent-repository/src/statistics/repository.rs @@ -0,0 +1,107 @@ +use std::sync::Arc; + +use tokio::sync::{RwLock, RwLockReadGuard}; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::Error; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::describe_metrics; +use super::metrics::Metrics; + +/// A repository for the torrent repository metrics. +#[derive(Clone)] +pub struct Repository { + pub stats: Arc>, +} + +impl Default for Repository { + fn default() -> Self { + Self::new() + } +} + +impl Repository { + #[must_use] + pub fn new() -> Self { + let stats = Arc::new(RwLock::new(describe_metrics())); + + Self { stats } + } + + pub async fn get_metrics(&self) -> RwLockReadGuard<'_, Metrics> { + self.stats.read().await + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increment the counter. + pub async fn increment_counter( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.increment_counter(metric_name, labels, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to increment the counter: {}", err), + } + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increment the gauge. + pub async fn increment_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.increment_gauge(metric_name, labels, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to increment the gauge: {}", err), + } + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// decrement the gauge. + pub async fn decrement_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.decrement_gauge(metric_name, labels, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to decrement the gauge: {}", err), + } + + result + } +} diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 4437ca410..8cf2982e6 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -1,127 +1,86 @@ //! A swarm is a collection of peers that are all trying to download the same //! torrent. use std::collections::BTreeMap; +use std::fmt::Debug; +use std::hash::{Hash, Hasher}; use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::AnnounceEvent; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::peer::{self, Peer, PeerAnnouncement}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; -#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +use crate::event::sender::Sender; +use crate::event::Event; + +#[derive(Clone)] pub struct Swarm { + info_hash: InfoHash, peers: BTreeMap>, metadata: SwarmMetadata, + event_sender: Sender, +} + +#[allow(clippy::missing_fields_in_debug)] +impl Debug for Swarm { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Swarm") + .field("peers", &self.peers) + .field("metadata", &self.metadata) + .finish() + } +} + +impl Hash for Swarm { + fn hash(&self, state: &mut H) { + self.peers.hash(state); + self.metadata.hash(state); + } +} + +impl PartialEq for Swarm { + fn eq(&self, other: &Self) -> bool { + self.peers == other.peers && self.metadata == other.metadata + } } +impl Eq for Swarm {} + impl Swarm { #[must_use] - pub fn new(downloaded: u32) -> Self { + pub fn new(info_hash: &InfoHash, downloaded: u32, event_sender: Sender) -> Self { Self { + info_hash: *info_hash, peers: BTreeMap::new(), metadata: SwarmMetadata::new(downloaded, 0, 0), + event_sender, } } - pub fn handle_announcement(&mut self, incoming_announce: &PeerAnnouncement) -> bool { + pub async fn handle_announcement(&mut self, incoming_announce: &PeerAnnouncement) -> bool { let mut downloads_increased: bool = false; let _previous_peer = match peer::ReadInfo::get_event(incoming_announce) { AnnounceEvent::Started | AnnounceEvent::None | AnnounceEvent::Completed => { - self.upsert_peer(Arc::new(*incoming_announce), &mut downloads_increased) + self.upsert_peer(Arc::new(*incoming_announce), &mut downloads_increased).await } - AnnounceEvent::Stopped => self.remove(incoming_announce), + AnnounceEvent::Stopped => self.remove_peer(&incoming_announce.peer_addr).await, }; downloads_increased } - pub fn upsert_peer(&mut self, incoming_announce: Arc, downloads_increased: &mut bool) -> Option> { - let is_now_seeder = incoming_announce.is_seeder(); - let has_completed = incoming_announce.event == AnnounceEvent::Completed; - - if let Some(old_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { - // A peer has been updated in the swarm. - - // Check if the peer has changed its from leecher to seeder or vice versa. - if old_announce.is_seeder() != is_now_seeder { - if is_now_seeder { - self.metadata.complete += 1; - self.metadata.incomplete -= 1; - } else { - self.metadata.complete -= 1; - self.metadata.incomplete += 1; - } - } - - // Check if the peer has completed downloading the torrent. - if has_completed && old_announce.event != AnnounceEvent::Completed { - self.metadata.downloaded += 1; - *downloads_increased = true; - } - - Some(old_announce) - } else { - // A new peer has been added to the swarm. - - // Check if the peer is a seeder or a leecher. - if is_now_seeder { - self.metadata.complete += 1; - } else { - self.metadata.incomplete += 1; - } + pub async fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) -> usize { + let peers_to_remove = self.inactive_peers(current_cutoff); - // Check if the peer has completed downloading the torrent. - if has_completed { - // Don't increment `downloaded` here: we only count transitions - // from a known peer - } - - None + for peer_addr in &peers_to_remove { + self.remove_peer(peer_addr).await; } - } - - pub fn remove(&mut self, peer_to_remove: &Peer) -> Option> { - match self.peers.remove(&peer_to_remove.peer_addr) { - Some(old_peer) => { - // A peer has been removed from the swarm. - - // Check if the peer was a seeder or a leecher. - if old_peer.is_seeder() { - self.metadata.complete -= 1; - } else { - self.metadata.incomplete -= 1; - } - - Some(old_peer) - } - None => None, - } - } - - pub fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) -> u64 { - let mut inactive_peers_removed = 0; - self.peers.retain(|_, peer| { - let is_active = peer::ReadInfo::get_updated(peer) > current_cutoff; - - if !is_active { - // Update the metadata when removing a peer. - if peer.is_seeder() { - self.metadata.complete -= 1; - } else { - self.metadata.incomplete -= 1; - } - - inactive_peers_removed += 1; - } - - is_active - }); - - inactive_peers_removed + peers_to_remove.len() } #[must_use] @@ -208,6 +167,57 @@ impl Swarm { !self.should_be_removed(policy) } + async fn upsert_peer( + &mut self, + incoming_announce: Arc, + downloads_increased: &mut bool, + ) -> Option> { + let announcement = incoming_announce.clone(); + + if let Some(previous_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { + *downloads_increased = self.update_metadata_on_update(&previous_announce, &announcement); + + self.trigger_peer_updated_event(&previous_announce, &announcement).await; + + if *downloads_increased { + self.trigger_peer_download_completed_event(&announcement).await; + } + + Some(previous_announce) + } else { + *downloads_increased = false; + + self.update_metadata_on_insert(&announcement); + + self.trigger_peer_added_event(&announcement).await; + + None + } + } + + async fn remove_peer(&mut self, peer_addr: &SocketAddr) -> Option> { + if let Some(old_peer) = self.peers.remove(peer_addr) { + self.update_metadata_on_removal(&old_peer); + + self.trigger_peer_removed_event(&old_peer).await; + + Some(old_peer) + } else { + None + } + } + + #[must_use] + fn inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Vec { + self.peers + .iter() + .filter(|(_, peer)| peer::ReadInfo::get_updated(&**peer) <= current_cutoff) + .map(|(addr, _)| *addr) + .collect() + } + + /// Returns true if the swarm should be removed according to the retention + /// policy. fn should_be_removed(&self, policy: &TrackerPolicy) -> bool { // If the policy is to remove peerless torrents and the swarm is empty (no peers), (policy.remove_peerless_torrents && self.is_empty()) @@ -217,6 +227,92 @@ impl Swarm { // See https://github.com/torrust/torrust-tracker/issues/1502) && !(policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0) } + + fn update_metadata_on_insert(&mut self, added_peer: &Arc) { + if added_peer.is_seeder() { + self.metadata.complete += 1; + } else { + self.metadata.incomplete += 1; + } + } + + fn update_metadata_on_removal(&mut self, removed_peer: &Arc) { + if removed_peer.is_seeder() { + self.metadata.complete -= 1; + } else { + self.metadata.incomplete -= 1; + } + } + + fn update_metadata_on_update( + &mut self, + previous_announce: &Arc, + new_announce: &Arc, + ) -> bool { + let mut downloads_increased = false; + + if previous_announce.role() != new_announce.role() { + if new_announce.is_seeder() { + self.metadata.complete += 1; + self.metadata.incomplete -= 1; + } else { + self.metadata.complete -= 1; + self.metadata.incomplete += 1; + } + } + + if new_announce.is_completed() && !previous_announce.is_completed() { + self.metadata.downloaded += 1; + downloads_increased = true; + } + + downloads_increased + } + + async fn trigger_peer_added_event(&self, announcement: &Arc) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerAdded { + info_hash: self.info_hash, + peer: *announcement.clone(), + }) + .await; + } + } + + async fn trigger_peer_removed_event(&self, old_peer: &Arc) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerRemoved { + info_hash: self.info_hash, + peer: *old_peer.clone(), + }) + .await; + } + } + + async fn trigger_peer_updated_event(&self, old_announce: &Arc, new_announce: &Arc) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerUpdated { + info_hash: self.info_hash, + old_peer: *old_announce.clone(), + new_peer: *new_announce.clone(), + }) + .await; + } + } + + async fn trigger_peer_download_completed_event(&self, new_announce: &Arc) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerDownloadCompleted { + info_hash: self.info_hash, + peer: *new_announce.clone(), + }) + .await; + } + } } #[cfg(test)] @@ -231,167 +327,181 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::swarm::Swarm; + use crate::tests::sample_info_hash; + + #[test] + fn it_should_allow_debugging() { + let swarm = Swarm::new(&sample_info_hash(), 0, None); + + assert_eq!( + format!("{swarm:?}"), + "Swarm { peers: {}, metadata: SwarmMetadata { downloaded: 0, complete: 0, incomplete: 0 } }" + ); + } #[test] fn it_should_be_empty_when_no_peers_have_been_inserted() { - let swarm = Swarm::default(); + let swarm = Swarm::new(&sample_info_hash(), 0, None); assert!(swarm.is_empty()); } #[test] fn it_should_have_zero_length_when_no_peers_have_been_inserted() { - let swarm = Swarm::default(); + let swarm = Swarm::new(&sample_info_hash(), 0, None); assert_eq!(swarm.len(), 0); } - #[test] - fn it_should_allow_inserting_a_new_peer() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_allow_inserting_a_new_peer() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - assert_eq!(swarm.upsert_peer(peer.into(), &mut downloads_increased), None); + assert_eq!(swarm.upsert_peer(peer.into(), &mut downloads_increased).await, None); } - #[test] - fn it_should_allow_updating_a_preexisting_peer() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_allow_updating_a_preexisting_peer() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; - assert_eq!(swarm.upsert_peer(peer.into(), &mut downloads_increased), Some(Arc::new(peer))); + assert_eq!( + swarm.upsert_peer(peer.into(), &mut downloads_increased).await, + Some(Arc::new(peer)) + ); } - #[test] - fn it_should_allow_getting_all_peers() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_allow_getting_all_peers() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.peers(None), [Arc::new(peer)]); } - #[test] - fn it_should_allow_getting_one_peer_by_id() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_allow_getting_one_peer_by_id() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.get(&peer.peer_addr), Some(Arc::new(peer)).as_ref()); } - #[test] - fn it_should_increase_the_number_of_peers_after_inserting_a_new_one() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_increase_the_number_of_peers_after_inserting_a_new_one() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.len(), 1); } - #[test] - fn it_should_decrease_the_number_of_peers_after_removing_one() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_decrease_the_number_of_peers_after_removing_one() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; - swarm.remove(&peer); + swarm.remove_peer(&peer.peer_addr).await; assert!(swarm.is_empty()); } - #[test] - fn it_should_allow_removing_an_existing_peer() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_allow_removing_an_existing_peer() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; - let old = swarm.remove(&peer); + let old = swarm.remove_peer(&peer.peer_addr).await; assert_eq!(old, Some(Arc::new(peer))); assert_eq!(swarm.get(&peer.peer_addr), None); } - #[test] - fn it_should_allow_removing_a_non_existing_peer() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_allow_removing_a_non_existing_peer() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); - assert_eq!(swarm.remove(&peer), None); + assert_eq!(swarm.remove_peer(&peer.peer_addr).await, None); } - #[test] - fn it_should_allow_getting_all_peers_excluding_peers_with_a_given_address() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_allow_getting_all_peers_excluding_peers_with_a_given_address() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer1 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer1.into(), &mut downloads_increased); + swarm.upsert_peer(peer1.into(), &mut downloads_increased).await; let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - swarm.upsert_peer(peer2.into(), &mut downloads_increased); + swarm.upsert_peer(peer2.into(), &mut downloads_increased).await; assert_eq!(swarm.peers_excluding(&peer2.peer_addr, None), [Arc::new(peer1)]); } - #[test] - fn it_should_remove_inactive_peers() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_remove_inactive_peers() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let one_second = DurationSinceUnixEpoch::new(1, 0); // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; // Remove peers not updated since one second after inserting the peer - swarm.remove_inactive(last_update_time + one_second); + swarm.remove_inactive(last_update_time + one_second).await; assert_eq!(swarm.len(), 0); } - #[test] - fn it_should_not_remove_active_peers() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_not_remove_active_peers() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let one_second = DurationSinceUnixEpoch::new(1, 0); // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; // Remove peers not updated since one second before inserting the peer. - swarm.remove_inactive(last_update_time - one_second); + swarm.remove_inactive(last_update_time - one_second).await; assert_eq!(swarm.len(), 1); } @@ -401,29 +511,30 @@ mod tests { use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use crate::tests::sample_info_hash; use crate::Swarm; fn empty_swarm() -> Swarm { - Swarm::default() + Swarm::new(&sample_info_hash(), 0, None) } - fn not_empty_swarm() -> Swarm { - let mut swarm = Swarm::default(); - swarm.upsert_peer(PeerBuilder::default().build().into(), &mut false); + async fn not_empty_swarm() -> Swarm { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + swarm.upsert_peer(PeerBuilder::default().build().into(), &mut false).await; swarm } - fn not_empty_swarm_with_downloads() -> Swarm { - let mut swarm = Swarm::default(); + async fn not_empty_swarm_with_downloads() -> Swarm { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut peer = PeerBuilder::leecher().build(); let mut downloads_increased = false; - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert!(swarm.metadata().downloads() > 0); @@ -457,13 +568,13 @@ mod tests { assert!(empty_swarm().should_be_removed(&remove_peerless_torrents_policy())); } - #[test] - fn it_should_not_be_removed_is_the_swarm_is_not_empty() { - assert!(!not_empty_swarm().should_be_removed(&remove_peerless_torrents_policy())); + #[tokio::test] + async fn it_should_not_be_removed_is_the_swarm_is_not_empty() { + assert!(!not_empty_swarm().await.should_be_removed(&remove_peerless_torrents_policy())); } - #[test] - fn it_should_not_be_removed_even_if_the_swarm_is_empty_if_we_need_to_track_stats_for_downloads_and_there_has_been_downloads( + #[tokio::test] + async fn it_should_not_be_removed_even_if_the_swarm_is_empty_if_we_need_to_track_stats_for_downloads_and_there_has_been_downloads( ) { let policy = TrackerPolicy { remove_peerless_torrents: true, @@ -471,7 +582,7 @@ mod tests { ..Default::default() }; - assert!(!not_empty_swarm_with_downloads().should_be_removed(&policy)); + assert!(!not_empty_swarm_with_downloads().await.should_be_removed(&policy)); } } @@ -486,34 +597,36 @@ mod tests { assert!(!empty_swarm().should_be_removed(&don_not_remove_peerless_torrents_policy())); } - #[test] - fn it_should_not_be_removed_is_the_swarm_is_not_empty() { - assert!(!not_empty_swarm().should_be_removed(&don_not_remove_peerless_torrents_policy())); + #[tokio::test] + async fn it_should_not_be_removed_is_the_swarm_is_not_empty() { + assert!(!not_empty_swarm() + .await + .should_be_removed(&don_not_remove_peerless_torrents_policy())); } } } - #[test] - fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer1 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer1.into(), &mut downloads_increased); + swarm.upsert_peer(peer1.into(), &mut downloads_increased).await; let peer2 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - swarm.upsert_peer(peer2.into(), &mut downloads_increased); + swarm.upsert_peer(peer2.into(), &mut downloads_increased).await; assert_eq!(swarm.len(), 2); } - #[test] - fn it_should_not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; // When that happens the peer ID will be changed in the swarm. @@ -523,27 +636,27 @@ mod tests { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer1.into(), &mut downloads_increased); + swarm.upsert_peer(peer1.into(), &mut downloads_increased).await; let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer2.into(), &mut downloads_increased); + swarm.upsert_peer(peer2.into(), &mut downloads_increased).await; assert_eq!(swarm.len(), 1); } - #[test] - fn it_should_return_the_metadata() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_return_the_swarm_metadata() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); - swarm.upsert_peer(leecher.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; + swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; assert_eq!( swarm.metadata(), @@ -555,84 +668,91 @@ mod tests { ); } - #[test] - fn it_should_return_the_number_of_seeders_in_the_list() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_return_the_number_of_seeders_in_the_list() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); - swarm.upsert_peer(leecher.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; + swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; let (seeders, _leechers) = swarm.seeders_and_leechers(); assert_eq!(seeders, 1); } - #[test] - fn it_should_return_the_number_of_leechers_in_the_list() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_return_the_number_of_leechers_in_the_list() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); - swarm.upsert_peer(leecher.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; + swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; let (_seeders, leechers) = swarm.seeders_and_leechers(); assert_eq!(leechers, 1); } + #[tokio::test] + async fn it_should_be_a_peerless_swarm_when_it_does_not_contain_any_peers() { + let swarm = Swarm::new(&sample_info_hash(), 0, None); + assert!(swarm.is_peerless()); + } + mod updating_the_swarm_metadata { mod when_a_new_peer_is_added { use torrust_tracker_primitives::peer::fixture::PeerBuilder; use crate::swarm::Swarm; + use crate::tests::sample_info_hash; - #[test] - fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let leechers = swarm.metadata().leechers(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(leecher.into(), &mut downloads_increased); + swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().leechers(), leechers + 1); } - #[test] - fn it_should_increase_the_number_of_seeders_if_the_new_peer_is_a_seeder() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_increase_the_number_of_seeders_if_the_new_peer_is_a_seeder() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let seeders = swarm.metadata().seeders(); let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().seeders(), seeders + 1); } - #[test] - fn it_should_not_increasing_the_number_of_downloads_if_the_new_peer_has_completed_downloading_as_it_was_not_previously_known( + #[tokio::test] + async fn it_should_not_increasing_the_number_of_downloads_if_the_new_peer_has_completed_downloading_as_it_was_not_previously_known( ) { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let downloads = swarm.metadata().downloads(); let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().downloads(), downloads); } @@ -642,35 +762,36 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; use crate::swarm::Swarm; + use crate::tests::sample_info_hash; - #[test] - fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(leecher.into(), &mut downloads_increased); + swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; let leechers = swarm.metadata().leechers(); - swarm.remove(&leecher); + swarm.remove_peer(&leecher.peer_addr).await; assert_eq!(swarm.metadata().leechers(), leechers - 1); } - #[test] - fn it_should_decrease_the_number_of_seeders_if_the_removed_peer_was_a_seeder() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_decrease_the_number_of_seeders_if_the_removed_peer_was_a_seeder() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; let seeders = swarm.metadata().seeders(); - swarm.remove(&seeder); + swarm.remove_peer(&seeder.peer_addr).await; assert_eq!(swarm.metadata().seeders(), seeders - 1); } @@ -682,35 +803,36 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; use crate::swarm::Swarm; + use crate::tests::sample_info_hash; - #[test] - fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(leecher.into(), &mut downloads_increased); + swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; let leechers = swarm.metadata().leechers(); - swarm.remove_inactive(leecher.updated + Duration::from_secs(1)); + swarm.remove_inactive(leecher.updated + Duration::from_secs(1)).await; assert_eq!(swarm.metadata().leechers(), leechers - 1); } - #[test] - fn it_should_decrease_the_number_of_seeders_when_the_removed_peer_is_a_seeder() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_decrease_the_number_of_seeders_when_the_removed_peer_is_a_seeder() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; let seeders = swarm.metadata().seeders(); - swarm.remove_inactive(seeder.updated + Duration::from_secs(1)); + swarm.remove_inactive(seeder.updated + Duration::from_secs(1)).await; assert_eq!(swarm.metadata().seeders(), seeders - 1); } @@ -721,84 +843,225 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; use crate::swarm::Swarm; + use crate::tests::sample_info_hash; - #[test] - fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; let leechers = swarm.metadata().leechers(); let seeders = swarm.metadata().seeders(); peer.left = NumberOfBytes::new(0); // Convert to seeder - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().seeders(), seeders + 1); assert_eq!(swarm.metadata().leechers(), leechers - 1); } - #[test] - fn it_should_increase_leechers_and_decreasing_seeders_when_the_peer_changes_from_seeder_to_leecher() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_increase_leechers_and_decreasing_seeders_when_the_peer_changes_from_seeder_to_leecher() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let mut peer = PeerBuilder::seeder().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; let leechers = swarm.metadata().leechers(); let seeders = swarm.metadata().seeders(); peer.left = NumberOfBytes::new(10); // Convert to leecher - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().leechers(), leechers + 1); assert_eq!(swarm.metadata().seeders(), seeders - 1); } - #[test] - fn it_should_increase_the_number_of_downloads_when_the_peer_announces_completed_downloading() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_increase_the_number_of_downloads_when_the_peer_announces_completed_downloading() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; let downloads = swarm.metadata().downloads(); peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().downloads(), downloads + 1); } - #[test] - fn it_should_not_increasing_the_number_of_downloads_when_the_peer_announces_completed_downloading_twice_() { - let mut swarm = Swarm::default(); + #[tokio::test] + async fn it_should_not_increasing_the_number_of_downloads_when_the_peer_announces_completed_downloading_twice_() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; let downloads = swarm.metadata().downloads(); peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().downloads(), downloads + 1); } } } + + mod triggering_events { + + use std::sync::Arc; + + use aquatic_udp_protocol::AnnounceEvent::Started; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::event::sender::tests::{expect_event_sequence, MockEventSender}; + use crate::event::Event; + use crate::swarm::Swarm; + use crate::tests::sample_info_hash; + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_new_peer_is_added() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence(&mut event_sender_mock, vec![Event::PeerAdded { info_hash, peer }]); + + let mut swarm = Swarm::new(&sample_info_hash(), 0, Some(Arc::new(event_sender_mock))); + + let mut downloads_increased = false; + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peer_is_directly_removed() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![Event::PeerAdded { info_hash, peer }, Event::PeerRemoved { info_hash, peer }], + ); + + let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + + // Insert the peer + let mut downloads_increased = false; + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + + swarm.remove_peer(&peer.peer_addr).await; + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peer_is_removed_due_to_inactivity() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![Event::PeerAdded { info_hash, peer }, Event::PeerRemoved { info_hash, peer }], + ); + + let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + + // Insert the peer + let mut downloads_increased = false; + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + + // Peers not updated after this time will be removed + let current_cutoff = peer.updated + DurationSinceUnixEpoch::from_secs(1); + + swarm.remove_inactive(current_cutoff).await; + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peer_is_updated() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().with_event(Started).build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::PeerAdded { info_hash, peer }, + Event::PeerUpdated { + info_hash, + old_peer: peer, + new_peer: peer, + }, + ], + ); + + let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + + // Insert the peer + let mut downloads_increased = false; + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + + // Update the peer + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peer_completes_a_download() { + let info_hash = sample_info_hash(); + let started_peer = PeerBuilder::leecher().with_event(Started).build(); + let completed_peer = started_peer.into_completed(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::PeerAdded { + info_hash, + peer: started_peer, + }, + Event::PeerUpdated { + info_hash, + old_peer: started_peer, + new_peer: completed_peer, + }, + Event::PeerDownloadCompleted { + info_hash, + peer: completed_peer, + }, + ], + ); + + let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + + // Insert the peer + let mut downloads_increased = false; + swarm.upsert_peer(started_peer.into(), &mut downloads_increased).await; + + // Announce as completed + swarm.upsert_peer(completed_peer.into(), &mut downloads_increased).await; + } + } } diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index a140663c9..8b8327778 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -1,22 +1,34 @@ -use std::sync::{Arc, Mutex}; +use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use crossbeam_skiplist::SkipMap; +use tokio::sync::Mutex; use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use crate::event::sender::Sender; +use crate::event::Event; use crate::swarm::Swarm; use crate::SwarmHandle; -#[derive(Default, Debug)] +#[derive(Default)] pub struct Swarms { swarms: SkipMap, + event_sender: Sender, } impl Swarms { + #[must_use] + pub fn new(event_sender: Sender) -> Self { + Self { + swarms: SkipMap::new(), + event_sender, + } + } + /// Upsert a peer into the swarm of a torrent. /// /// Optionally, it can also preset the number of downloads of the torrent @@ -37,34 +49,57 @@ impl Swarms { /// # Errors /// /// This function panics if the lock for the swarm handle cannot be acquired. - pub fn handle_announcement( + #[allow(clippy::await_holding_lock)] + pub async fn handle_announcement( &self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option, ) -> Result { - let swarm_handle = if let Some(number_of_downloads) = opt_persistent_torrent { - SwarmHandle::new(Swarm::new(number_of_downloads).into()) - } else { - SwarmHandle::default() + let swarm_handle = match self.swarms.get(info_hash) { + None => { + let number_of_downloads = opt_persistent_torrent.unwrap_or_default(); + + let new_swarm_handle = + SwarmHandle::new(Swarm::new(info_hash, number_of_downloads, self.event_sender.clone()).into()); + + let new_swarm_handle = self.swarms.get_or_insert(*info_hash, new_swarm_handle); + + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::TorrentAdded { + info_hash: *info_hash, + announcement: *peer, + }) + .await; + } + + new_swarm_handle + } + Some(existing_swarm_handle) => existing_swarm_handle, }; - let swarm_handle = self.swarms.get_or_insert(*info_hash, swarm_handle); + let mut swarm = swarm_handle.value().lock().await; - let mut swarm = swarm_handle.value().lock()?; + let downloads_increased = swarm.handle_announcement(peer).await; - Ok(swarm.handle_announcement(peer)) + Ok(downloads_increased) } - /// Inserts a new swarm. + /// Inserts a new swarm. Only used for testing purposes. pub fn insert(&self, info_hash: &InfoHash, swarm: Swarm) { - // code-review: swarms builder? + // code-review: swarms builder? or constructor from vec? // It's only used for testing purposes. It allows to pre-define the // initial state of the swarm without having to go through the upsert // process. let swarm_handle = Arc::new(Mutex::new(swarm)); + self.swarms.insert(*info_hash, swarm_handle); + + // IMPORTANT: Notice this does not send an event because is used only + // for testing purposes. The event is sent only when the torrent is + // announced for the first time. } /// Removes a torrent entry from the repository. @@ -73,8 +108,14 @@ impl Swarms { /// /// An `Option` containing the removed torrent entry if it existed. #[must_use] - pub fn remove(&self, key: &InfoHash) -> Option { - self.swarms.remove(key).map(|entry| entry.value().clone()) + pub async fn remove(&self, key: &InfoHash) -> Option { + let swarm_handle = self.swarms.remove(key).map(|entry| entry.value().clone()); + + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender.send(Event::TorrentRemoved { info_hash: *key }).await; + } + + swarm_handle } /// Retrieves a tracked torrent handle by its infohash. @@ -124,11 +165,11 @@ impl Swarms { /// # Errors /// /// This function panics if the lock for the swarm handle cannot be acquired. - pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Result, Error> { + pub async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Result, Error> { match self.swarms.get(info_hash) { None => Ok(None), Some(swarm_handle) => { - let swarm = swarm_handle.value().lock()?; + let swarm = swarm_handle.value().lock().await; Ok(Some(swarm.metadata())) } } @@ -145,8 +186,8 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for the /// swarm handle. - pub fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> Result { - match self.get_swarm_metadata(info_hash) { + pub async fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> Result { + match self.get_swarm_metadata(info_hash).await { Ok(Some(swarm_metadata)) => Ok(swarm_metadata), Ok(None) => Ok(SwarmMetadata::zeroed()), Err(err) => Err(err), @@ -169,7 +210,7 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for the /// swarm handle. - pub fn get_peers_peers_excluding( + pub async fn get_peers_peers_excluding( &self, info_hash: &InfoHash, peer: &peer::Peer, @@ -178,7 +219,7 @@ impl Swarms { match self.get(info_hash) { None => Ok(vec![]), Some(swarm_handle) => { - let swarm = swarm_handle.lock()?; + let swarm = swarm_handle.lock().await; Ok(swarm.peers_excluding(&peer.peer_addr, Some(limit))) } } @@ -198,11 +239,11 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for the /// swarm handle. - pub fn get_swarm_peers(&self, info_hash: &InfoHash, limit: usize) -> Result>, Error> { + pub async fn get_swarm_peers(&self, info_hash: &InfoHash, limit: usize) -> Result>, Error> { match self.get(info_hash) { None => Ok(vec![]), Some(swarm_handle) => { - let swarm = swarm_handle.lock()?; + let swarm = swarm_handle.lock().await; Ok(swarm.peers(Some(limit))) } } @@ -217,7 +258,7 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Result { + pub async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Result { tracing::info!( "Removing inactive peers since: {:?} ...", convert_from_timestamp_to_datetime_utc(current_cutoff) @@ -226,8 +267,8 @@ impl Swarms { let mut inactive_peers_removed = 0; for swarm_handle in &self.swarms { - let mut swarm = swarm_handle.value().lock()?; - let removed = swarm.remove_inactive(current_cutoff); + let mut swarm = swarm_handle.value().lock().await; + let removed = swarm.remove_inactive(current_cutoff).await; inactive_peers_removed += removed; } @@ -245,21 +286,27 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> Result { + pub async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> Result { tracing::info!("Removing peerless torrents ..."); let mut peerless_torrents_removed = 0; for swarm_handle in &self.swarms { - let swarm = swarm_handle.value().lock()?; + let swarm = swarm_handle.value().lock().await; if swarm.meets_retaining_policy(policy) { continue; } + let info_hash = *swarm_handle.key(); + swarm_handle.remove(); peerless_torrents_removed += 1; + + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender.send(Event::TorrentRemoved { info_hash }).await; + } } tracing::info!(peerless_torrents_removed = peerless_torrents_removed); @@ -282,7 +329,7 @@ impl Swarms { continue; } - let entry = SwarmHandle::new(Swarm::new(*completed).into()); + let entry = SwarmHandle::new(Swarm::new(info_hash, *completed, self.event_sender.clone()).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. @@ -310,11 +357,11 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub fn get_aggregate_swarm_metadata(&self) -> Result { + pub async fn get_aggregate_swarm_metadata(&self) -> Result { let mut metrics = AggregateSwarmMetadata::default(); for swarm_handle in &self.swarms { - let swarm = swarm_handle.value().lock()?; + let swarm = swarm_handle.value().lock().await; let stats = swarm.metadata(); @@ -338,11 +385,11 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub fn count_peerless_torrents(&self) -> Result { + pub async fn count_peerless_torrents(&self) -> Result { let mut peerless_torrents = 0; for swarm_handle in &self.swarms { - let swarm = swarm_handle.value().lock()?; + let swarm = swarm_handle.value().lock().await; if swarm.is_peerless() { peerless_torrents += 1; @@ -362,11 +409,11 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub fn count_peers(&self) -> Result { + pub async fn count_peers(&self) -> Result { let mut peers = 0; for swarm_handle in &self.swarms { - let swarm = swarm_handle.value().lock()?; + let swarm = swarm_handle.value().lock().await; peers += swarm.len(); } @@ -386,24 +433,20 @@ impl Swarms { } #[derive(thiserror::Error, Debug, Clone)] -pub enum Error { - #[error("Can't acquire swarm lock")] - CannotAcquireSwarmLock, -} - -impl From>> for Error { - fn from(_error: std::sync::PoisonError>) -> Self { - Error::CannotAcquireSwarmLock - } -} +pub enum Error {} #[cfg(test)] mod tests { - mod the_in_memory_torrent_repository { + mod the_swarm_repository { + + use std::sync::Arc; use aquatic_udp_protocol::PeerId; + use crate::swarms::Swarms; + use crate::tests::{sample_info_hash, sample_peer}; + /// It generates a peer id from a number where the number is the last /// part of the peer ID. For example, for `12` it returns /// `-qB00000000000000012`. @@ -424,14 +467,50 @@ mod tests { // The `TorrentRepository` has these responsibilities: // - To maintain the peer lists for each torrent. - // - To maintain the the torrent entries, which contains all the info about the - // torrents, including the peer lists. - // - To return the torrent entries. + // - To maintain the the torrent entries, which contains all the info + // about the torrents, including the peer lists. + // - To return the torrent entries (swarm handles). // - To return the peer lists for a given torrent. // - To return the torrent metrics. // - To return the swarm metadata for a given torrent. // - To handle the persistence of the torrent entries. + #[tokio::test] + async fn it_should_return_zero_length_when_it_has_no_swarms() { + let swarms = Arc::new(Swarms::default()); + assert_eq!(swarms.len(), 0); + } + + #[tokio::test] + async fn it_should_return_the_length_when_it_has_swarms() { + let swarms = Arc::new(Swarms::default()); + let info_hash = sample_info_hash(); + let peer = sample_peer(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + assert_eq!(swarms.len(), 1); + } + + #[tokio::test] + async fn it_should_be_empty_when_it_has_no_swarms() { + let swarms = Arc::new(Swarms::default()); + assert!(swarms.is_empty()); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + assert!(!swarms.is_empty()); + } + + #[tokio::test] + async fn it_should_not_be_empty_when_it_has_at_least_one_swarm() { + let swarms = Arc::new(Swarms::default()); + let info_hash = sample_info_hash(); + let peer = sample_peer(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + assert!(!swarms.is_empty()); + } + mod maintaining_the_peer_lists { use std::sync::Arc; @@ -445,7 +524,7 @@ mod tests { let info_hash = sample_info_hash(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); + swarms.handle_announcement(&info_hash, &sample_peer(), None).await.unwrap(); assert!(swarms.get(&info_hash).is_some()); } @@ -456,8 +535,8 @@ mod tests { let info_hash = sample_info_hash(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); + swarms.handle_announcement(&info_hash, &sample_peer(), None).await.unwrap(); + swarms.handle_announcement(&info_hash, &sample_peer(), None).await.unwrap(); assert!(swarms.get(&info_hash).is_some()); } @@ -472,7 +551,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarms::tests::the_in_memory_torrent_repository::numeric_peer_id; + use crate::swarms::tests::the_swarm_repository::numeric_peer_id; use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; @@ -483,9 +562,9 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); - let peers = swarms.get_swarm_peers(&info_hash, 74).unwrap(); + let peers = swarms.get_swarm_peers(&info_hash, 74).await.unwrap(); assert_eq!(peers, vec![Arc::new(peer)]); } @@ -494,7 +573,7 @@ mod tests { async fn it_should_return_an_empty_list_or_peers_for_a_non_existing_torrent() { let swarms = Arc::new(Swarms::default()); - let peers = swarms.get_swarm_peers(&sample_info_hash(), 74).unwrap(); + let peers = swarms.get_swarm_peers(&sample_info_hash(), 74).await.unwrap(); assert!(peers.is_empty()); } @@ -516,10 +595,10 @@ mod tests { event: AnnounceEvent::Completed, }; - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); } - let peers = swarms.get_swarm_peers(&info_hash, 74).unwrap(); + let peers = swarms.get_swarm_peers(&info_hash, 74).await.unwrap(); assert_eq!(peers.len(), 74); } @@ -534,7 +613,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarms::tests::the_in_memory_torrent_repository::numeric_peer_id; + use crate::swarms::tests::the_swarm_repository::numeric_peer_id; use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; @@ -544,6 +623,7 @@ mod tests { let peers = swarms .get_peers_peers_excluding(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT) + .await .unwrap(); assert_eq!(peers, vec![]); @@ -556,10 +636,11 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); let peers = swarms .get_peers_peers_excluding(&info_hash, &peer, TORRENT_PEERS_LIMIT) + .await .unwrap(); assert_eq!(peers, vec![]); @@ -573,7 +654,7 @@ mod tests { let excluded_peer = sample_peer(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &excluded_peer, None); + swarms.handle_announcement(&info_hash, &excluded_peer, None).await.unwrap(); // Add 74 peers for idx in 2..=75 { @@ -587,11 +668,12 @@ mod tests { event: AnnounceEvent::Completed, }; - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); } let peers = swarms .get_peers_peers_excluding(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT) + .await .unwrap(); assert_eq!(peers.len(), 74); @@ -617,9 +699,9 @@ mod tests { let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); + swarms.handle_announcement(&info_hash, &sample_peer(), None).await.unwrap(); - let _unused = swarms.remove(&info_hash); + let _unused = swarms.remove(&info_hash).await; assert!(swarms.get(&info_hash).is_none()); } @@ -632,27 +714,33 @@ mod tests { let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); // Cut off time is 1 second after the peer was updated swarms .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) + .await .unwrap(); - assert!(!swarms.get_swarm_peers(&info_hash, 74).unwrap().contains(&Arc::new(peer))); + assert!(!swarms + .get_swarm_peers(&info_hash, 74) + .await + .unwrap() + .contains(&Arc::new(peer))); } - fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { + async fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { let swarms = Arc::new(Swarms::default()); // Insert a sample peer for the torrent to force adding the torrent entry let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = swarms.handle_announcement(info_hash, &peer, None); + swarms.handle_announcement(info_hash, &peer, None).await.unwrap(); // Remove the peer swarms .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) + .await .unwrap(); swarms @@ -662,14 +750,14 @@ mod tests { async fn it_should_remove_torrents_without_peers() { let info_hash = sample_info_hash(); - let swarms = initialize_repository_with_one_torrent_without_peers(&info_hash); + let swarms = initialize_repository_with_one_torrent_without_peers(&info_hash).await; let tracker_policy = TrackerPolicy { remove_peerless_torrents: true, ..Default::default() }; - swarms.remove_peerless_torrents(&tracker_policy).unwrap(); + swarms.remove_peerless_torrents(&tracker_policy).await.unwrap(); assert!(swarms.get(&info_hash).is_none()); } @@ -683,7 +771,7 @@ mod tests { use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; - use crate::{LockTrackedTorrent, SwarmHandle}; + use crate::{Swarm, SwarmHandle}; /// `TorrentEntry` data is not directly accessible. It's only /// accessible through the trait methods. We need this temporary @@ -695,19 +783,19 @@ mod tests { number_of_peers: usize, } + async fn torrent_entry_info(swarm_handle: SwarmHandle) -> TorrentEntryInfo { + let torrent_guard = swarm_handle.lock().await; + torrent_guard.clone().into() + } + #[allow(clippy::from_over_into)] - impl Into for SwarmHandle { + impl Into for Swarm { fn into(self) -> TorrentEntryInfo { - let torrent_guard = self.lock_or_panic(); - let torrent_entry_info = TorrentEntryInfo { - swarm_metadata: torrent_guard.metadata(), - peers: torrent_guard.peers(None).iter().map(|peer| *peer.clone()).collect(), - number_of_peers: torrent_guard.len(), + swarm_metadata: self.metadata(), + peers: self.peers(None).iter().map(|peer| *peer.clone()).collect(), + number_of_peers: self.len(), }; - - drop(torrent_guard); - torrent_entry_info } } @@ -719,9 +807,9 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); - let torrent_entry = swarms.get(&info_hash).unwrap(); + let torrent_entry_info = torrent_entry_info(swarms.get(&info_hash).unwrap()).await; assert_eq!( TorrentEntryInfo { @@ -733,7 +821,7 @@ mod tests { peers: vec!(peer), number_of_peers: 1 }, - torrent_entry.into() + torrent_entry_info ); } @@ -742,7 +830,9 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; + use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::{ + torrent_entry_info, TorrentEntryInfo, + }; use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; @@ -752,13 +842,13 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); let torrent_entries = swarms.get_paginated(None); assert_eq!(torrent_entries.len(), 1); - let torrent_entry = torrent_entries.first().unwrap().1.clone(); + let torrent_entry = torrent_entry_info(torrent_entries.first().unwrap().1.clone()).await; assert_eq!( TorrentEntryInfo { @@ -770,7 +860,7 @@ mod tests { peers: vec!(peer), number_of_peers: 1 }, - torrent_entry.into() + torrent_entry ); } @@ -780,7 +870,9 @@ mod tests { use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; + use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::{ + torrent_entry_info, TorrentEntryInfo, + }; use crate::swarms::Swarms; use crate::tests::{ sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, @@ -794,19 +886,19 @@ mod tests { // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_one, None); + swarms.handle_announcement(&info_hash_one, &peer_one, None).await.unwrap(); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_two, None); + swarms.handle_announcement(&info_hash_one, &peer_two, None).await.unwrap(); // Get only the first page where page size is 1 let torrent_entries = swarms.get_paginated(Some(&Pagination { offset: 0, limit: 1 })); assert_eq!(torrent_entries.len(), 1); - let torrent_entry = torrent_entries.first().unwrap().1.clone(); + let torrent_entry_info = torrent_entry_info(torrent_entries.first().unwrap().1.clone()).await; assert_eq!( TorrentEntryInfo { @@ -818,7 +910,7 @@ mod tests { peers: vec!(peer_one), number_of_peers: 1 }, - torrent_entry.into() + torrent_entry_info ); } @@ -829,19 +921,19 @@ mod tests { // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_one, None); + swarms.handle_announcement(&info_hash_one, &peer_one, None).await.unwrap(); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_two, None); + swarms.handle_announcement(&info_hash_one, &peer_two, None).await.unwrap(); // Get only the first page where page size is 1 let torrent_entries = swarms.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); assert_eq!(torrent_entries.len(), 1); - let torrent_entry = torrent_entries.first().unwrap().1.clone(); + let torrent_entry_info = torrent_entry_info(torrent_entries.first().unwrap().1.clone()).await; assert_eq!( TorrentEntryInfo { @@ -853,7 +945,7 @@ mod tests { peers: vec!(peer_two), number_of_peers: 1 }, - torrent_entry.into() + torrent_entry_info ); } @@ -864,12 +956,12 @@ mod tests { // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_one, None); + swarms.handle_announcement(&info_hash_one, &peer_one, None).await.unwrap(); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_two, None); + swarms.handle_announcement(&info_hash_one, &peer_two, None).await.unwrap(); // Get only the first page where page size is 1 let torrent_entries = swarms.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); @@ -896,7 +988,7 @@ mod tests { async fn it_should_get_empty_aggregate_swarm_metadata_when_there_are_no_torrents() { let swarms = Arc::new(Swarms::default()); - let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -913,9 +1005,12 @@ mod tests { async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_leecher() { let swarms = Arc::new(Swarms::default()); - let _number_of_downloads_increased = swarms.handle_announcement(&sample_info_hash(), &leecher(), None); + swarms + .handle_announcement(&sample_info_hash(), &leecher(), None) + .await + .unwrap(); - let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -932,9 +1027,12 @@ mod tests { async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_seeder() { let swarms = Arc::new(Swarms::default()); - let _number_of_downloads_increased = swarms.handle_announcement(&sample_info_hash(), &seeder(), None); + swarms + .handle_announcement(&sample_info_hash(), &seeder(), None) + .await + .unwrap(); - let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -951,9 +1049,12 @@ mod tests { async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_completed_peer() { let swarms = Arc::new(Swarms::default()); - let _number_of_downloads_increased = swarms.handle_announcement(&sample_info_hash(), &complete_peer(), None); + swarms + .handle_announcement(&sample_info_hash(), &complete_peer(), None) + .await + .unwrap(); - let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -972,12 +1073,15 @@ mod tests { let start_time = std::time::Instant::now(); for i in 0..1_000_000 { - let _number_of_downloads_increased = swarms.handle_announcement(&gen_seeded_infohash(&i), &leecher(), None); + swarms + .handle_announcement(&gen_seeded_infohash(&i), &leecher(), None) + .await + .unwrap(); } let result_a = start_time.elapsed(); let start_time = std::time::Instant::now(); - let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); let result_b = start_time.elapsed(); assert_eq!( @@ -991,6 +1095,59 @@ mod tests { "{result_a:?} {result_b:?}" ); } + + mod it_should_count_peerless_torrents { + use std::sync::Arc; + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::swarms::Swarms; + use crate::tests::{sample_info_hash, sample_peer}; + + #[tokio::test] + async fn no_peerless_torrents() { + let swarms = Arc::new(Swarms::default()); + assert_eq!(swarms.count_peerless_torrents().await.unwrap(), 0); + } + + #[tokio::test] + async fn one_peerless_torrents() { + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + let swarms = Arc::new(Swarms::default()); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + let current_cutoff = peer.updated + DurationSinceUnixEpoch::from_secs(1); + swarms.remove_inactive_peers(current_cutoff).await.unwrap(); + + assert_eq!(swarms.count_peerless_torrents().await.unwrap(), 1); + } + } + + mod it_should_count_peers { + use std::sync::Arc; + + use crate::swarms::Swarms; + use crate::tests::{sample_info_hash, sample_peer}; + + #[tokio::test] + async fn no_peers() { + let swarms = Arc::new(Swarms::default()); + assert_eq!(swarms.count_peers().await.unwrap(), 0); + } + + #[tokio::test] + async fn one_peer() { + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + let swarms = Arc::new(Swarms::default()); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + assert_eq!(swarms.count_peers().await.unwrap(), 1); + } + } } mod returning_swarm_metadata { @@ -1008,9 +1165,9 @@ mod tests { let infohash = sample_info_hash(); - let _number_of_downloads_increased = swarms.handle_announcement(&infohash, &leecher(), None); + swarms.handle_announcement(&infohash, &leecher(), None).await.unwrap(); - let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).unwrap(); + let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).await.unwrap(); assert_eq!( swarm_metadata, @@ -1026,7 +1183,7 @@ mod tests { async fn it_should_return_zeroed_swarm_metadata_for_a_non_existing_torrent() { let swarms = Arc::new(Swarms::default()); - let swarm_metadata = swarms.get_swarm_metadata_or_default(&sample_info_hash()).unwrap(); + let swarm_metadata = swarms.get_swarm_metadata_or_default(&sample_info_hash()).await.unwrap(); assert_eq!(swarm_metadata, SwarmMetadata::zeroed()); } @@ -1039,7 +1196,7 @@ mod tests { use torrust_tracker_primitives::PersistentTorrents; use crate::swarms::Swarms; - use crate::tests::sample_info_hash; + use crate::tests::{leecher, sample_info_hash}; #[tokio::test] async fn it_should_allow_importing_persisted_torrent_entries() { @@ -1053,11 +1210,156 @@ mod tests { swarms.import_persistent(&persistent_torrents); - let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).unwrap(); + let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).await.unwrap(); // Only the number of downloads is persisted. assert_eq!(swarm_metadata.downloaded, 1); } + + #[tokio::test] + async fn it_should_allow_overwriting_a_previously_imported_persisted_torrent() { + // code-review: do we want to allow this? + + let swarms = Arc::new(Swarms::default()); + + let infohash = sample_info_hash(); + + let mut persistent_torrents = PersistentTorrents::default(); + + persistent_torrents.insert(infohash, 1); + persistent_torrents.insert(infohash, 2); + + swarms.import_persistent(&persistent_torrents); + + let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).await.unwrap(); + + // It takes the last value + assert_eq!(swarm_metadata.downloaded, 2); + } + + #[tokio::test] + async fn it_should_now_allow_importing_a_persisted_torrent_if_it_already_exists() { + let swarms = Arc::new(Swarms::default()); + + let infohash = sample_info_hash(); + + // Insert a new the torrent entry + swarms.handle_announcement(&infohash, &leecher(), None).await.unwrap(); + let initial_number_of_downloads = swarms.get_swarm_metadata_or_default(&infohash).await.unwrap().downloaded; + + // Try to import the torrent entry + let new_number_of_downloads = initial_number_of_downloads + 1; + let mut persistent_torrents = PersistentTorrents::default(); + persistent_torrents.insert(infohash, new_number_of_downloads); + swarms.import_persistent(&persistent_torrents); + + // The number of downloads should not be changed + assert_eq!( + swarms.get_swarm_metadata_or_default(&infohash).await.unwrap().downloaded, + initial_number_of_downloads + ); + } + } + } + + mod triggering_events { + + use std::sync::Arc; + + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::event::sender::tests::{expect_event_sequence, MockEventSender}; + use crate::event::Event; + use crate::swarms::Swarms; + use crate::tests::sample_info_hash; + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_new_torrent_is_added() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::TorrentAdded { + info_hash, + announcement: peer, + }, + Event::PeerAdded { info_hash, peer }, + ], + ); + + let swarms = Swarms::new(Some(Arc::new(event_sender_mock))); + + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_torrent_is_directly_removed() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::TorrentAdded { + info_hash, + announcement: peer, + }, + Event::PeerAdded { info_hash, peer }, + Event::TorrentRemoved { info_hash }, + ], + ); + + let swarms = Swarms::new(Some(Arc::new(event_sender_mock))); + + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + swarms.remove(&info_hash).await.unwrap(); + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peerless_torrent_is_removed() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::TorrentAdded { + info_hash, + announcement: peer, + }, + Event::PeerAdded { info_hash, peer }, + Event::PeerRemoved { info_hash, peer }, + Event::TorrentRemoved { info_hash }, + ], + ); + + let swarms = Swarms::new(Some(Arc::new(event_sender_mock))); + + // Add the new torrent + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + // Remove the peer + let current_cutoff = peer.updated + DurationSinceUnixEpoch::from_secs(1); + swarms.remove_inactive_peers(current_cutoff).await.unwrap(); + + // Remove peerless torrents + + let tracker_policy = torrust_tracker_configuration::TrackerPolicy { + remove_peerless_torrents: true, + ..Default::default() + }; + + swarms.remove_peerless_torrents(&tracker_policy).await.unwrap(); } } } diff --git a/packages/torrent-repository/tests/swarm/mod.rs b/packages/torrent-repository/tests/swarm/mod.rs index d529b0243..cb4009ba9 100644 --- a/packages/torrent-repository/tests/swarm/mod.rs +++ b/packages/torrent-repository/tests/swarm/mod.rs @@ -3,6 +3,7 @@ use std::ops::Sub; use std::time::Duration; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use bittorrent_primitives::info_hash::InfoHash; use rstest::{fixture, rstest}; use torrust_tracker_clock::clock::stopped::Stopped as _; use torrust_tracker_clock::clock::{self, Time as _}; @@ -16,7 +17,7 @@ use crate::CurrentClock; #[fixture] fn swarm() -> Swarm { - Swarm::default() + Swarm::new(&InfoHash::default(), 0, None) } #[fixture] @@ -47,39 +48,39 @@ pub enum Makes { Three, } -fn make(swarm: &mut Swarm, makes: &Makes) -> Vec { +async fn make(swarm: &mut Swarm, makes: &Makes) -> Vec { match makes { Makes::Empty => vec![], Makes::Started => { let peer = a_started_peer(1); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; vec![peer] } Makes::Completed => { let peer = a_completed_peer(2); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; vec![peer] } Makes::Downloaded => { let mut peer = a_started_peer(3); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; peer.event = AnnounceEvent::Completed; peer.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; vec![peer] } Makes::Three => { let peer_1 = a_started_peer(1); - swarm.handle_announcement(&peer_1); + swarm.handle_announcement(&peer_1).await; let peer_2 = a_completed_peer(2); - swarm.handle_announcement(&peer_2); + swarm.handle_announcement(&peer_2).await; let mut peer_3 = a_started_peer(3); - swarm.handle_announcement(&peer_3); + swarm.handle_announcement(&peer_3).await; peer_3.event = AnnounceEvent::Completed; peer_3.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer_3); + swarm.handle_announcement(&peer_3).await; vec![peer_1, peer_2, peer_3] } } @@ -89,7 +90,7 @@ fn make(swarm: &mut Swarm, makes: &Makes) -> Vec { #[case::empty(&Makes::Empty)] #[tokio::test] async fn it_should_be_empty_by_default(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes); + make(&mut swarm, makes).await; assert_eq!(swarm.len(), 0); } @@ -106,7 +107,7 @@ async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy #[case] makes: &Makes, #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, ) { - make(&mut swarm, makes); + make(&mut swarm, makes).await; let has_peers = !swarm.is_empty(); let has_downloads = swarm.metadata().downloaded != 0; @@ -140,7 +141,7 @@ async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_get_peers_for_torrent_entry(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - let peers = make(&mut swarm, makes); + let peers = make(&mut swarm, makes).await; let torrent_peers = swarm.peers(None); @@ -159,11 +160,11 @@ async fn it_should_get_peers_for_torrent_entry(#[values(swarm())] mut swarm: Swa #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_update_a_peer(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes); + make(&mut swarm, makes).await; // Make and insert a new peer. let mut peer = a_started_peer(-1); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; // Get the Inserted Peer by Id. let peers = swarm.peers(None); @@ -176,7 +177,7 @@ async fn it_should_update_a_peer(#[values(swarm())] mut swarm: Swarm, #[case] ma // Announce "Completed" torrent download event. peer.event = AnnounceEvent::Completed; - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; // Get the Updated Peer by Id. let peers = swarm.peers(None); @@ -198,11 +199,11 @@ async fn it_should_update_a_peer(#[values(swarm())] mut swarm: Swarm, #[case] ma async fn it_should_remove_a_peer_upon_stopped_announcement(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { use torrust_tracker_primitives::peer::ReadInfo as _; - make(&mut swarm, makes); + make(&mut swarm, makes).await; let mut peer = a_started_peer(-1); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; // The started peer should be inserted. let peers = swarm.peers(None); @@ -215,7 +216,7 @@ async fn it_should_remove_a_peer_upon_stopped_announcement(#[values(swarm())] mu // Change peer to "Stopped" and insert. peer.event = AnnounceEvent::Stopped; - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; // It should be removed now. let peers = swarm.peers(None); @@ -237,7 +238,7 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade #[values(swarm())] mut torrent: Swarm, #[case] makes: &Makes, ) { - make(&mut torrent, makes); + make(&mut torrent, makes).await; let downloaded = torrent.metadata().downloaded; let peers = torrent.peers(None); @@ -248,7 +249,7 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade // Announce "Completed" torrent download event. peer.event = AnnounceEvent::Completed; - torrent.handle_announcement(&peer); + torrent.handle_announcement(&peer).await; let stats = torrent.metadata(); if is_already_completed { @@ -265,7 +266,7 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_update_a_peer_as_a_seeder(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - let peers = make(&mut swarm, makes); + let peers = make(&mut swarm, makes).await; let completed = u32::try_from(peers.iter().filter(|p| p.is_seeder()).count()).expect("it_should_not_be_so_many"); let peers = swarm.peers(None); @@ -275,7 +276,7 @@ async fn it_should_update_a_peer_as_a_seeder(#[values(swarm())] mut swarm: Swarm // Set Bytes Left to Zero peer.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; let stats = swarm.metadata(); if is_already_non_left { @@ -294,7 +295,7 @@ async fn it_should_update_a_peer_as_a_seeder(#[values(swarm())] mut swarm: Swarm #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_update_a_peer_as_incomplete(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - let peers = make(&mut swarm, makes); + let peers = make(&mut swarm, makes).await; let incomplete = u32::try_from(peers.iter().filter(|p| !p.is_seeder()).count()).expect("it should not be so many"); let peers = swarm.peers(None); @@ -304,7 +305,7 @@ async fn it_should_update_a_peer_as_incomplete(#[values(swarm())] mut swarm: Swa // Set Bytes Left to no Zero peer.left = NumberOfBytes::new(1); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; let stats = swarm.metadata(); if completed_already { @@ -323,7 +324,7 @@ async fn it_should_update_a_peer_as_incomplete(#[values(swarm())] mut swarm: Swa #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_get_peers_excluding_the_client_socket(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes); + make(&mut swarm, makes).await; let peers = swarm.peers(None); let mut peer = **peers.first().expect("there should be a peer"); @@ -338,7 +339,7 @@ async fn it_should_get_peers_excluding_the_client_socket(#[values(swarm())] mut // set the address to the socket. peer.peer_addr = socket; - swarm.handle_announcement(&peer); // Add peer + swarm.handle_announcement(&peer).await; // Add peer // It should not include the peer that has the same socket. assert!(!swarm.peers_excluding(&socket, None).contains(&peer.into())); @@ -352,12 +353,12 @@ async fn it_should_get_peers_excluding_the_client_socket(#[values(swarm())] mut #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_limit_the_number_of_peers_returned(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes); + make(&mut swarm, makes).await; // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { let peer = a_started_peer(peer_number); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; } let peers = swarm.peers(Some(TORRENT_PEERS_LIMIT)); @@ -376,7 +377,7 @@ async fn it_should_remove_inactive_peers_beyond_cutoff(#[values(swarm())] mut sw const TIMEOUT: Duration = Duration::from_secs(120); const EXPIRE: Duration = Duration::from_secs(121); - let peers = make(&mut swarm, makes); + let peers = make(&mut swarm, makes).await; let mut peer = a_completed_peer(-1); @@ -385,12 +386,12 @@ async fn it_should_remove_inactive_peers_beyond_cutoff(#[values(swarm())] mut sw peer.updated = now.sub(EXPIRE); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; assert_eq!(swarm.len(), peers.len() + 1); let current_cutoff = CurrentClock::now_sub(&TIMEOUT).unwrap_or_default(); - swarm.remove_inactive(current_cutoff); + swarm.remove_inactive(current_cutoff).await; assert_eq!(swarm.len(), peers.len()); } diff --git a/packages/torrent-repository/tests/swarms/mod.rs b/packages/torrent-repository/tests/swarms/mod.rs index 8e58b9e76..780d6cd4c 100644 --- a/packages/torrent-repository/tests/swarms/mod.rs +++ b/packages/torrent-repository/tests/swarms/mod.rs @@ -3,16 +3,21 @@ use std::hash::{DefaultHasher, Hash, Hasher}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use bittorrent_primitives::info_hash::InfoHash; +use futures::future::join_all; use rstest::{fixture, rstest}; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::PersistentTorrents; use torrust_tracker_torrent_repository::swarm::Swarm; -use torrust_tracker_torrent_repository::{LockTrackedTorrent, Swarms}; +use torrust_tracker_torrent_repository::Swarms; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; +fn swarm() -> Swarm { + Swarm::new(&InfoHash::default(), 0, None) +} + #[fixture] fn swarms() -> Swarms { Swarms::default() @@ -27,53 +32,53 @@ fn empty() -> Entries { #[fixture] fn default() -> Entries { - vec![(InfoHash::default(), Swarm::default())] + vec![(InfoHash::default(), swarm())] } #[fixture] -fn started() -> Entries { - let mut swarm = Swarm::default(); - swarm.handle_announcement(&a_started_peer(1)); +async fn started() -> Entries { + let mut swarm = swarm(); + swarm.handle_announcement(&a_started_peer(1)).await; vec![(InfoHash::default(), swarm)] } #[fixture] -fn completed() -> Entries { - let mut swarm = Swarm::default(); - swarm.handle_announcement(&a_completed_peer(2)); +async fn completed() -> Entries { + let mut swarm = swarm(); + swarm.handle_announcement(&a_completed_peer(2)).await; vec![(InfoHash::default(), swarm)] } #[fixture] -fn downloaded() -> Entries { - let mut swarm = Swarm::default(); +async fn downloaded() -> Entries { + let mut swarm = swarm(); let mut peer = a_started_peer(3); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; peer.event = AnnounceEvent::Completed; peer.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; vec![(InfoHash::default(), swarm)] } #[fixture] -fn three() -> Entries { - let mut started = Swarm::default(); +async fn three() -> Entries { + let mut started = swarm(); let started_h = &mut DefaultHasher::default(); - started.handle_announcement(&a_started_peer(1)); + started.handle_announcement(&a_started_peer(1)).await; started.hash(started_h); - let mut completed = Swarm::default(); + let mut completed = swarm(); let completed_h = &mut DefaultHasher::default(); - completed.handle_announcement(&a_completed_peer(2)); + completed.handle_announcement(&a_completed_peer(2)).await; completed.hash(completed_h); - let mut downloaded = Swarm::default(); + let mut downloaded = swarm(); let downloaded_h = &mut DefaultHasher::default(); let mut downloaded_peer = a_started_peer(3); - downloaded.handle_announcement(&downloaded_peer); + downloaded.handle_announcement(&downloaded_peer).await; downloaded_peer.event = AnnounceEvent::Completed; downloaded_peer.left = NumberOfBytes::new(0); - downloaded.handle_announcement(&downloaded_peer); + downloaded.handle_announcement(&downloaded_peer).await; downloaded.hash(downloaded_h); vec![ @@ -84,12 +89,12 @@ fn three() -> Entries { } #[fixture] -fn many_out_of_order() -> Entries { +async fn many_out_of_order() -> Entries { let mut entries: HashSet<(InfoHash, Swarm)> = HashSet::default(); for i in 0..408 { - let mut entry = Swarm::default(); - entry.handle_announcement(&a_started_peer(i)); + let mut entry = swarm(); + entry.handle_announcement(&a_started_peer(i)).await; entries.insert((InfoHash::from(&i), entry)); } @@ -99,12 +104,12 @@ fn many_out_of_order() -> Entries { } #[fixture] -fn many_hashed_in_order() -> Entries { +async fn many_hashed_in_order() -> Entries { let mut entries: BTreeMap = BTreeMap::default(); for i in 0..408 { - let mut entry = Swarm::default(); - entry.handle_announcement(&a_started_peer(i)); + let mut entry = swarm(); + entry.handle_announcement(&a_started_peer(i)).await; let hash: &mut DefaultHasher = &mut DefaultHasher::default(); hash.write_i32(i); @@ -191,21 +196,18 @@ fn policy_remove_persist() -> TrackerPolicy { #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_get_a_torrent_entry(#[values(swarms())] repo: Swarms, #[case] entries: Entries) { make(&repo, &entries); if let Some((info_hash, swarm)) = entries.first() { - assert_eq!( - Some(repo.get(info_hash).unwrap().lock_or_panic().clone()), - Some(swarm.clone()) - ); + assert_eq!(Some(repo.get(info_hash).unwrap().lock().await.clone()), Some(swarm.clone())); } else { assert!(repo.get(&InfoHash::default()).is_none()); } @@ -214,23 +216,23 @@ async fn it_should_get_a_torrent_entry(#[values(swarms())] repo: Swarms, #[case] #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( #[values(swarms())] repo: Swarms, #[case] entries: Entries, - many_out_of_order: Entries, + #[future] many_out_of_order: Entries, ) { make(&repo, &entries); let entries_a = repo.get_paginated(None).iter().map(|(i, _)| *i).collect::>(); - make(&repo, &many_out_of_order); + make(&repo, &many_out_of_order.await); let entries_b = repo.get_paginated(None).iter().map(|(i, _)| *i).collect::>(); @@ -247,12 +249,12 @@ async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_get_paginated( #[values(swarms())] repo: Swarms, @@ -267,11 +269,15 @@ async fn it_should_get_paginated( match paginated { // it should return empty if limit is zero. Pagination { limit: 0, .. } => { - let swarms: Vec<(InfoHash, Swarm)> = repo - .get_paginated(Some(&paginated)) - .iter() - .map(|(i, swarm_handle)| (*i, swarm_handle.lock_or_panic().clone())) - .collect(); + let page = repo.get_paginated(Some(&paginated)); + + let futures = page.iter().map(|(i, swarm_handle)| { + let i = *i; + let swarm_handle = swarm_handle.clone(); + async move { (i, swarm_handle.lock().await.clone()) } + }); + + let swarms: Vec<(InfoHash, Swarm)> = join_all(futures).await; assert_eq!(swarms, vec![]); } @@ -287,7 +293,7 @@ async fn it_should_get_paginated( } } - // it should return the only the second entry if both the limit and the offset are one. + // it should return only the second entry if both the limit and the offset are one. Pagination { limit: 1, offset: 1 } => { if info_hashes.len() > 1 { let page = repo.get_paginated(Some(&paginated)); @@ -295,7 +301,7 @@ async fn it_should_get_paginated( assert_eq!(page[0].0, info_hashes[1]); } } - // the other cases are not yet tested. + _ => {} } } @@ -303,12 +309,12 @@ async fn it_should_get_paginated( #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_get_metrics(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; @@ -326,18 +332,18 @@ async fn it_should_get_metrics(#[values(swarms())] swarms: Swarms, #[case] entri metrics.total_downloaded += u64::from(stats.downloaded); } - assert_eq!(swarms.get_aggregate_swarm_metadata().unwrap(), metrics); + assert_eq!(swarms.get_aggregate_swarm_metadata().await.unwrap(), metrics); } #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_import_persistent_torrents( #[values(swarms())] swarms: Swarms, @@ -346,12 +352,15 @@ async fn it_should_import_persistent_torrents( ) { make(&swarms, &entries); - let mut downloaded = swarms.get_aggregate_swarm_metadata().unwrap().total_downloaded; + let mut downloaded = swarms.get_aggregate_swarm_metadata().await.unwrap().total_downloaded; persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); swarms.import_persistent(&persistent_torrents); - assert_eq!(swarms.get_aggregate_swarm_metadata().unwrap().total_downloaded, downloaded); + assert_eq!( + swarms.get_aggregate_swarm_metadata().await.unwrap().total_downloaded, + downloaded + ); for (entry, _) in persistent_torrents { assert!(swarms.get(&entry).is_some()); @@ -361,42 +370,42 @@ async fn it_should_import_persistent_torrents( #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_remove_an_entry(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { make(&swarms, &entries); for (info_hash, torrent) in entries { assert_eq!( - Some(swarms.get(&info_hash).unwrap().lock_or_panic().clone()), + Some(swarms.get(&info_hash).unwrap().lock().await.clone()), Some(torrent.clone()) ); assert_eq!( - Some(swarms.remove(&info_hash).unwrap().lock_or_panic().clone()), + Some(swarms.remove(&info_hash).await.unwrap().lock().await.clone()), Some(torrent) ); assert!(swarms.get(&info_hash).is_none()); - assert!(swarms.remove(&info_hash).is_none()); + assert!(swarms.remove(&info_hash).await.is_none()); } - assert_eq!(swarms.get_aggregate_swarm_metadata().unwrap().total_torrents, 0); + assert_eq!(swarms.get_aggregate_swarm_metadata().await.unwrap().total_torrents, 0); } #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { use std::ops::Sub as _; @@ -435,9 +444,9 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // Insert the infohash and peer into the repository // and verify there is an extra torrent entry. { - swarms.handle_announcement(&info_hash, &peer, None).unwrap(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); assert_eq!( - swarms.get_aggregate_swarm_metadata().unwrap().total_torrents, + swarms.get_aggregate_swarm_metadata().await.unwrap().total_torrents, entries.len() as u64 + 1 ); } @@ -445,8 +454,8 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // Insert the infohash and peer into the repository // and verify the swarm metadata was updated. { - swarms.handle_announcement(&info_hash, &peer, None).unwrap(); - let stats = swarms.get_swarm_metadata(&info_hash).unwrap(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + let stats = swarms.get_swarm_metadata(&info_hash).await.unwrap(); assert_eq!( stats, Some(SwarmMetadata { @@ -460,7 +469,7 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // Verify that this new peer was inserted into the repository. { let lock_tracked_torrent = swarms.get(&info_hash).expect("it_should_get_some"); - let entry = lock_tracked_torrent.lock_or_panic(); + let entry = lock_tracked_torrent.lock().await; assert!(entry.peers(None).contains(&peer.into())); } @@ -468,13 +477,14 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c { swarms .remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")) + .await .unwrap(); } // Verify that the this peer was removed from the repository. { let lock_tracked_torrent = swarms.get(&info_hash).expect("it_should_get_some"); - let entry = lock_tracked_torrent.lock_or_panic(); + let entry = lock_tracked_torrent.lock().await; assert!(!entry.peers(None).contains(&peer.into())); } } @@ -482,12 +492,12 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_remove_peerless_torrents( #[values(swarms())] swarms: Swarms, @@ -496,13 +506,17 @@ async fn it_should_remove_peerless_torrents( ) { make(&swarms, &entries); - swarms.remove_peerless_torrents(&policy).unwrap(); + swarms.remove_peerless_torrents(&policy).await.unwrap(); + + let paginated = swarms.get_paginated(None); // ← store the result in a named variable + + let futures = paginated.iter().map(|(i, swarm_handle)| { + let i = *i; + let swarm_handle = swarm_handle.clone(); + async move { (i, swarm_handle.lock().await.clone()) } + }); - let torrents: Vec<(InfoHash, Swarm)> = swarms - .get_paginated(None) - .iter() - .map(|(i, lock_tracked_torrent)| (*i, lock_tracked_torrent.lock_or_panic().clone())) - .collect(); + let torrents: Vec<(InfoHash, Swarm)> = join_all(futures).await; for (_, entry) in torrents { assert!(entry.meets_retaining_policy(&policy)); diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index fac0a38c8..a2e8db743 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -171,24 +171,29 @@ impl AnnounceHandler { peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); - let number_of_downloads_increased = - self.in_memory_torrent_repository - .upsert_peer(info_hash, peer, opt_persistent_torrent); + let number_of_downloads_increased = self + .in_memory_torrent_repository + .upsert_peer(info_hash, peer, opt_persistent_torrent) + .await; if self.config.tracker_policy.persistent_torrent_completed_stat && number_of_downloads_increased { self.db_torrent_repository.increase_number_of_downloads(info_hash)?; } - Ok(self.build_announce_data(info_hash, peer, peers_wanted)) + Ok(self.build_announce_data(info_hash, peer, peers_wanted).await) } /// Builds the announce data for the peer making the request. - fn build_announce_data(&self, info_hash: &InfoHash, peer: &peer::Peer, peers_wanted: &PeersWanted) -> AnnounceData { + async fn build_announce_data(&self, info_hash: &InfoHash, peer: &peer::Peer, peers_wanted: &PeersWanted) -> AnnounceData { let peers = self .in_memory_torrent_repository - .get_peers_for(info_hash, peer, peers_wanted.limit()); + .get_peers_for(info_hash, peer, peers_wanted.limit()) + .await; - let swarm_metadata = self.in_memory_torrent_repository.get_swarm_metadata_or_default(info_hash); + let swarm_metadata = self + .in_memory_torrent_repository + .get_swarm_metadata_or_default(info_hash) + .await; AnnounceData { peers, @@ -594,7 +599,7 @@ mod tests { use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_test_helpers::configuration; - use torrust_tracker_torrent_repository::LockTrackedTorrent; + use torrust_tracker_torrent_repository::Swarms; use crate::announce_handler::tests::the_announce_handler::peer_ip; use crate::announce_handler::{AnnounceHandler, PeersWanted}; @@ -613,7 +618,8 @@ mod tests { config.core.tracker_policy.persistent_torrent_completed_stat = true; let database = initialize_database(&config.core); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let swarms = Arc::new(Swarms::default()); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(swarms)); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( &config.core, @@ -648,7 +654,7 @@ mod tests { assert_eq!(announce_data.stats.downloaded, 1); // Remove the newly updated torrent from memory - let _unused = in_memory_torrent_repository.remove(&info_hash); + let _unused = in_memory_torrent_repository.remove(&info_hash).await; torrents_manager.load_torrents_from_database().unwrap(); @@ -657,10 +663,10 @@ mod tests { .expect("it should be able to get entry"); // It persists the number of completed peers. - assert_eq!(torrent_entry.lock_or_panic().metadata().downloaded, 1); + assert_eq!(torrent_entry.lock().await.metadata().downloaded, 1); // It does not persist the peers - assert!(torrent_entry.lock_or_panic().is_empty()); + assert!(torrent_entry.lock().await.is_empty()); } } diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index 9f4d23802..f4fb272de 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use torrust_tracker_configuration::Core; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::announce_handler::AnnounceHandler; use crate::authentication::handler::KeysHandler; @@ -36,7 +37,7 @@ pub struct TrackerCoreContainer { impl TrackerCoreContainer { #[must_use] - pub fn initialize(core_config: &Arc) -> Self { + pub fn initialize_from(core_config: &Arc, torrent_repository_container: &Arc) -> Self { let database = initialize_database(core_config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(core_config, &in_memory_whitelist.clone())); @@ -48,7 +49,7 @@ impl TrackerCoreContainer { &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(torrent_repository_container.swarms.clone())); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( diff --git a/packages/tracker-core/src/scrape_handler.rs b/packages/tracker-core/src/scrape_handler.rs index 5d78c7d90..443d989a6 100644 --- a/packages/tracker-core/src/scrape_handler.rs +++ b/packages/tracker-core/src/scrape_handler.rs @@ -112,7 +112,11 @@ impl ScrapeHandler { for info_hash in info_hashes { let swarm_metadata = match self.whitelist_authorization.authorize(info_hash).await { - Ok(()) => self.in_memory_torrent_repository.get_swarm_metadata_or_default(info_hash), + Ok(()) => { + self.in_memory_torrent_repository + .get_swarm_metadata_or_default(info_hash) + .await + } Err(_) => SwarmMetadata::zeroed(), }; scrape_data.add_file(info_hash, swarm_metadata); diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index aaac811f2..bc193bd4f 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -90,35 +90,36 @@ impl TorrentsManager { /// 2. If the tracker is configured to remove peerless torrents /// (`remove_peerless_torrents` is set), it removes entire torrent /// entries that have no active peers. - pub fn cleanup_torrents(&self) { - self.log_aggregate_swarm_metadata(); + pub async fn cleanup_torrents(&self) { + self.log_aggregate_swarm_metadata().await; - self.remove_inactive_peers(); + self.remove_inactive_peers().await; - self.log_aggregate_swarm_metadata(); + self.log_aggregate_swarm_metadata().await; - self.remove_peerless_torrents(); + self.remove_peerless_torrents().await; - self.log_aggregate_swarm_metadata(); + self.log_aggregate_swarm_metadata().await; } - fn remove_inactive_peers(&self) { + async fn remove_inactive_peers(&self) { let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))) .unwrap_or_default(); - self.in_memory_torrent_repository.remove_inactive_peers(current_cutoff); + self.in_memory_torrent_repository.remove_inactive_peers(current_cutoff).await; } - fn remove_peerless_torrents(&self) { + async fn remove_peerless_torrents(&self) { if self.config.tracker_policy.remove_peerless_torrents { self.in_memory_torrent_repository - .remove_peerless_torrents(&self.config.tracker_policy); + .remove_peerless_torrents(&self.config.tracker_policy) + .await; } } - fn log_aggregate_swarm_metadata(&self) { + async fn log_aggregate_swarm_metadata(&self) { // Pre-calculated data - let aggregate_swarm_metadata = self.in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let aggregate_swarm_metadata = self.in_memory_torrent_repository.get_aggregate_swarm_metadata().await; tracing::info!(name: "pre_calculated_aggregate_swarm_metadata", torrents = aggregate_swarm_metadata.total_torrents, @@ -128,8 +129,8 @@ impl TorrentsManager { ); // Hot data (iterating over data structures) - let peerless_torrents = self.in_memory_torrent_repository.count_peerless_torrents(); - let peers = self.in_memory_torrent_repository.count_peers(); + let peerless_torrents = self.in_memory_torrent_repository.count_peerless_torrents().await; + let peers = self.in_memory_torrent_repository.count_peers().await; tracing::info!(name: "hot_aggregate_swarm_metadata", peerless_torrents = peerless_torrents, @@ -144,7 +145,7 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Core; - use torrust_tracker_torrent_repository::LockTrackedTorrent; + use torrust_tracker_torrent_repository::Swarms; use super::{DatabasePersistentTorrentRepository, TorrentsManager}; use crate::databases::setup::initialize_database; @@ -163,7 +164,8 @@ mod tests { } fn initialize_torrents_manager_with(config: Core) -> (Arc, Arc) { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let swarms = Arc::new(Swarms::default()); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(swarms)); let database = initialize_database(&config); let database_persistent_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); @@ -183,8 +185,8 @@ mod tests { ) } - #[test] - fn it_should_load_the_numbers_of_downloads_for_all_torrents_from_the_database() { + #[tokio::test] + async fn it_should_load_the_numbers_of_downloads_for_all_torrents_from_the_database() { let (torrents_manager, services) = initialize_torrents_manager(); let infohash = sample_info_hash(); @@ -198,7 +200,8 @@ mod tests { .in_memory_torrent_repository .get(&infohash) .unwrap() - .lock_or_panic() + .lock() + .await .metadata() .downloaded, 1 @@ -219,8 +222,8 @@ mod tests { use crate::torrent::manager::tests::{initialize_torrents_manager, initialize_torrents_manager_with}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - #[test] - fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { + #[tokio::test] + async fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { let (torrents_manager, services) = initialize_torrents_manager(); let infohash = sample_info_hash(); @@ -230,7 +233,10 @@ mod tests { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = services.in_memory_torrent_repository.upsert_peer(&infohash, &peer, None); + let _number_of_downloads_increased = services + .in_memory_torrent_repository + .upsert_peer(&infohash, &peer, None) + .await; // Simulate the time has passed 1 second more than the max peer timeout. clock::Stopped::local_add(&Duration::from_secs( @@ -238,23 +244,25 @@ mod tests { )) .unwrap(); - torrents_manager.cleanup_torrents(); + torrents_manager.cleanup_torrents().await; assert!(services.in_memory_torrent_repository.get(&infohash).is_none()); } - fn add_a_peerless_torrent(infohash: &InfoHash, in_memory_torrent_repository: &Arc) { + async fn add_a_peerless_torrent(infohash: &InfoHash, in_memory_torrent_repository: &Arc) { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(infohash, &peer, None); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(infohash, &peer, None).await; // Remove the peer. The torrent is now peerless. - in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); + in_memory_torrent_repository + .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) + .await; } - #[test] - fn it_should_remove_torrents_that_have_no_peers_when_it_is_configured_to_do_so() { + #[tokio::test] + async fn it_should_remove_torrents_that_have_no_peers_when_it_is_configured_to_do_so() { let mut config = ephemeral_configuration(); config.tracker_policy.remove_peerless_torrents = true; @@ -262,15 +270,15 @@ mod tests { let infohash = sample_info_hash(); - add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository); + add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository).await; - torrents_manager.cleanup_torrents(); + torrents_manager.cleanup_torrents().await; assert!(services.in_memory_torrent_repository.get(&infohash).is_none()); } - #[test] - fn it_should_retain_peerless_torrents_when_it_is_configured_to_do_so() { + #[tokio::test] + async fn it_should_retain_peerless_torrents_when_it_is_configured_to_do_so() { let mut config = ephemeral_configuration(); config.tracker_policy.remove_peerless_torrents = false; @@ -278,9 +286,9 @@ mod tests { let infohash = sample_info_hash(); - add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository); + add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository).await; - torrents_manager.cleanup_torrents(); + torrents_manager.cleanup_torrents().await; assert!(services.in_memory_torrent_repository.get(&infohash).is_some()); } diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index ffb53edad..311480306 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -18,13 +18,18 @@ use torrust_tracker_torrent_repository::{SwarmHandle, Swarms}; /// /// Multiple implementations were considered, and the chosen implementation is /// used in production. Other implementations are kept for reference. -#[derive(Debug, Default)] +#[derive(Default)] pub struct InMemoryTorrentRepository { /// The underlying in-memory data structure that stores swarms data. swarms: Arc, } impl InMemoryTorrentRepository { + #[must_use] + pub fn new(swarms: Arc) -> Self { + Self { swarms } + } + /// Inserts or updates a peer in the torrent entry corresponding to the /// given infohash. /// @@ -44,7 +49,7 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub fn upsert_peer( + pub async fn upsert_peer( &self, info_hash: &InfoHash, peer: &peer::Peer, @@ -52,6 +57,7 @@ impl InMemoryTorrentRepository { ) -> bool { self.swarms .handle_announcement(info_hash, peer, opt_persistent_torrent) + .await .expect("Failed to upsert the peer in swarms") } @@ -70,8 +76,8 @@ impl InMemoryTorrentRepository { /// An `Option` containing the removed torrent entry if it existed. #[cfg(test)] #[must_use] - pub(crate) fn remove(&self, key: &InfoHash) -> Option { - self.swarms.remove(key) + pub(crate) async fn remove(&self, key: &InfoHash) -> Option { + self.swarms.remove(key).await } /// Removes inactive peers from all torrent entries. @@ -87,9 +93,10 @@ impl InMemoryTorrentRepository { /// # Panics /// /// This function panics if the underling swarms return an error. - pub(crate) fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + pub(crate) async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { self.swarms .remove_inactive_peers(current_cutoff) + .await .expect("Failed to remove inactive peers from swarms"); } @@ -106,9 +113,10 @@ impl InMemoryTorrentRepository { /// # Panics /// /// This function panics if the underling swarms return an error. - pub(crate) fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + pub(crate) async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { self.swarms .remove_peerless_torrents(policy) + .await .expect("Failed to remove peerless torrents from swarms"); } @@ -162,9 +170,10 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error.s #[must_use] - pub(crate) fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> SwarmMetadata { + pub(crate) async fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> SwarmMetadata { self.swarms .get_swarm_metadata_or_default(info_hash) + .await .expect("Failed to get swarm metadata") } @@ -190,9 +199,10 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub(crate) fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { + pub(crate) async fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { self.swarms .get_peers_peers_excluding(info_hash, peer, max(limit, TORRENT_PEERS_LIMIT)) + .await .expect("Failed to get other peers in swarm") } @@ -214,10 +224,11 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { + pub async fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { // todo: pass the limit as an argument like `get_peers_for` self.swarms .get_swarm_peers(info_hash, TORRENT_PEERS_LIMIT) + .await .expect("Failed to get other peers in swarm") } @@ -235,9 +246,10 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub fn get_aggregate_swarm_metadata(&self) -> AggregateSwarmMetadata { + pub async fn get_aggregate_swarm_metadata(&self) -> AggregateSwarmMetadata { self.swarms .get_aggregate_swarm_metadata() + .await .expect("Failed to get aggregate swarm metadata") } @@ -247,9 +259,10 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub fn count_peerless_torrents(&self) -> usize { + pub async fn count_peerless_torrents(&self) -> usize { self.swarms .count_peerless_torrents() + .await .expect("Failed to count peerless torrents") } @@ -259,8 +272,8 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub fn count_peers(&self) -> usize { - self.swarms.count_peers().expect("Failed to count peers") + pub async fn count_peers(&self) -> usize { + self.swarms.count_peers().await.expect("Failed to count peers") } /// Imports persistent torrent data into the in-memory repository. diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index a35fd7aed..97694a80f 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -17,7 +17,6 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::peer; -use torrust_tracker_torrent_repository::LockTrackedTorrent; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -94,14 +93,17 @@ pub struct BasicInfo { /// /// This function panics if the lock for the torrent entry cannot be obtained. #[must_use] -pub fn get_torrent_info(in_memory_torrent_repository: &Arc, info_hash: &InfoHash) -> Option { +pub async fn get_torrent_info( + in_memory_torrent_repository: &Arc, + info_hash: &InfoHash, +) -> Option { let torrent_entry_option = in_memory_torrent_repository.get(info_hash); let torrent_entry = torrent_entry_option?; - let stats = torrent_entry.lock_or_panic().metadata(); + let stats = torrent_entry.lock().await.metadata(); - let peers = torrent_entry.lock_or_panic().peers(None); + let peers = torrent_entry.lock().await.peers(None); let peers = Some(peers.iter().map(|peer| (**peer)).collect()); @@ -136,14 +138,14 @@ pub fn get_torrent_info(in_memory_torrent_repository: &Arc, pagination: Option<&Pagination>, ) -> Vec { let mut basic_infos: Vec = vec![]; for (info_hash, torrent_entry) in in_memory_torrent_repository.get_paginated(pagination) { - let stats = torrent_entry.lock_or_panic().metadata(); + let stats = torrent_entry.lock().await.metadata(); basic_infos.push(BasicInfo { info_hash, @@ -178,19 +180,21 @@ pub fn get_torrents_page( /// /// This function panics if the lock for the torrent entry cannot be obtained. #[must_use] -pub fn get_torrents(in_memory_torrent_repository: &Arc, info_hashes: &[InfoHash]) -> Vec { +pub async fn get_torrents( + in_memory_torrent_repository: &Arc, + info_hashes: &[InfoHash], +) -> Vec { let mut basic_infos: Vec = vec![]; for info_hash in info_hashes { - if let Some(stats) = in_memory_torrent_repository - .get(info_hash) - .map(|torrent_entry| torrent_entry.lock_or_panic().metadata()) - { + if let Some(torrent_entry) = in_memory_torrent_repository.get(info_hash) { + let metadata = torrent_entry.lock().await.metadata(); + basic_infos.push(BasicInfo { info_hash: *info_hash, - seeders: u64::from(stats.complete), - completed: u64::from(stats.downloaded), - leechers: u64::from(stats.incomplete), + seeders: u64::from(metadata.complete), + completed: u64::from(metadata.downloaded), + leechers: u64::from(metadata.incomplete), }); } } @@ -235,7 +239,8 @@ mod tests { let torrent_info = get_torrent_info( &in_memory_torrent_repository, &InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(), // DevSkim: ignore DS173237 - ); + ) + .await; assert!(torrent_info.is_none()); } @@ -246,9 +251,11 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash, &sample_peer(), None) + .await; - let torrent_info = get_torrent_info(&in_memory_torrent_repository, &info_hash).unwrap(); + let torrent_info = get_torrent_info(&in_memory_torrent_repository, &info_hash).await.unwrap(); assert_eq!( torrent_info, @@ -278,7 +285,7 @@ mod tests { async fn it_should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())).await; assert_eq!(torrents, vec![]); } @@ -290,9 +297,11 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash, &sample_peer(), None) + .await; - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())).await; assert_eq!( torrents, @@ -315,13 +324,17 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer(), None); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash1, &sample_peer(), None) + .await; + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash2, &sample_peer(), None) + .await; let offset = 0; let limit = 1; - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::new(offset, limit))); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); } @@ -336,13 +349,17 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer(), None); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash1, &sample_peer(), None) + .await; + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash2, &sample_peer(), None) + .await; let offset = 1; let limit = 4000; - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::new(offset, limit))); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); assert_eq!( @@ -362,13 +379,17 @@ mod tests { let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash1 = InfoHash::from_str(&hash1).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash1, &sample_peer(), None) + .await; let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash2, &sample_peer(), None) + .await; - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())).await; assert_eq!( torrents, @@ -403,7 +424,7 @@ mod tests { async fn it_should_return_an_empty_list_if_none_of_the_requested_torrents_is_found() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let torrent_info = get_torrents(&in_memory_torrent_repository, &[sample_info_hash()]); + let torrent_info = get_torrents(&in_memory_torrent_repository, &[sample_info_hash()]).await; assert!(torrent_info.is_empty()); } @@ -414,9 +435,11 @@ mod tests { let info_hash = sample_info_hash(); - let _ = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _ = in_memory_torrent_repository + .upsert_peer(&info_hash, &sample_peer(), None) + .await; - let torrent_info = get_torrents(&in_memory_torrent_repository, &[info_hash]); + let torrent_info = get_torrents(&in_memory_torrent_repository, &[info_hash]).await; assert_eq!( torrent_info, diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index 6cf250074..9a27ec826 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -33,6 +33,7 @@ torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configur torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } tracing = "0" zerocopy = "0.7" diff --git a/packages/udp-tracker-core/benches/helpers/sync.rs b/packages/udp-tracker-core/benches/helpers/sync.rs index 1814a865e..e8ec1ce03 100644 --- a/packages/udp-tracker-core/benches/helpers/sync.rs +++ b/packages/udp-tracker-core/benches/helpers/sync.rs @@ -5,6 +5,7 @@ use std::time::{Duration, Instant}; use bittorrent_udp_tracker_core::event::bus::EventBus; use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::connect::ConnectService; +use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::helpers::utils::{sample_ipv4_remote_addr, sample_issue_time}; @@ -16,7 +17,7 @@ pub async fn connect_once(samples: u64) -> Duration { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index 98c01a703..07a8a09ef 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use bittorrent_tracker_core::container::TrackerCoreContainer; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, UdpTracker}; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; @@ -31,7 +32,15 @@ pub struct UdpTrackerCoreContainer { impl UdpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc, udp_tracker_config: &Arc) -> Arc { - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + core_config, + &torrent_repository_container, + )); + Self::initialize_from_tracker_core(&tracker_core_container, udp_tracker_config) } @@ -41,6 +50,7 @@ impl UdpTrackerCoreContainer { udp_tracker_config: &Arc, ) -> Arc { let udp_tracker_core_services = UdpTrackerCoreServices::initialize_from(tracker_core_container); + Self::initialize_from_services(tracker_core_container, &udp_tracker_core_services, udp_tracker_config) } @@ -83,7 +93,7 @@ impl UdpTrackerCoreServices { let udp_core_broadcaster = Broadcaster::default(); let udp_core_stats_repository = Arc::new(Repository::new()); let event_bus = Arc::new(EventBus::new( - tracker_core_container.core_config.tracker_usage_statistics, + tracker_core_container.core_config.tracker_usage_statistics.into(), udp_core_broadcaster.clone(), )); diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index 18c9fd0ba..6ba36f274 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -61,6 +61,7 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::connection_cookie::make; @@ -79,7 +80,7 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); @@ -100,7 +101,7 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); @@ -122,7 +123,7 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index c76f02040..20ba2ea7f 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -63,7 +63,7 @@ pub async fn get_metrics( in_memory_torrent_repository: Arc, stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; let stats = stats_repository.get_stats().await; TrackerMetrics { diff --git a/packages/udp-tracker-server/Cargo.toml b/packages/udp-tracker-server/Cargo.toml index 4d0296461..a0c129acb 100644 --- a/packages/udp-tracker-server/Cargo.toml +++ b/packages/udp-tracker-server/Cargo.toml @@ -33,6 +33,7 @@ torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } tracing = "0" url = { version = "2", features = ["serde"] } uuid = { version = "1", features = ["v4"] } diff --git a/packages/udp-tracker-server/src/container.rs b/packages/udp-tracker-server/src/container.rs index a0bc8f35b..365db4ca7 100644 --- a/packages/udp-tracker-server/src/container.rs +++ b/packages/udp-tracker-server/src/container.rs @@ -39,7 +39,7 @@ impl UdpTrackerServerServices { let udp_server_broadcaster = Broadcaster::default(); let udp_server_stats_repository = Arc::new(Repository::new()); let udp_server_stats_event_bus = Arc::new(EventBus::new( - core_config.tracker_usage_statistics, + core_config.tracker_usage_statistics.into(), udp_server_broadcaster.clone(), )); diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 962442fde..f92d5dd29 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -8,6 +8,7 @@ use tokio::task::JoinHandle; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_primitives::peer; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::container::UdpTrackerServerContainer; use crate::server::spawner::Spawner; @@ -33,12 +34,13 @@ where { /// Add a torrent to the tracker #[allow(dead_code)] - pub fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { + pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { let _number_of_downloads_increased = self .container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer, None); + .upsert_peer(info_hash, peer, None) + .await; } } @@ -173,9 +175,18 @@ impl EnvContainer { let udp_tracker_configurations = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); let udp_tracker_config = Arc::new(udp_tracker_configurations[0].clone()); - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &torrent_repository_container, + )); + let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &udp_tracker_config); + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); Self { diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 5311531aa..65b521f27 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -206,6 +206,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event, UdpRequestKind}; @@ -254,7 +255,8 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer_id) @@ -348,12 +350,13 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } - fn add_a_torrent_peer_using_ipv6(in_memory_torrent_repository: &Arc) { + async fn add_a_torrent_peer_using_ipv6(in_memory_torrent_repository: &Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -366,8 +369,9 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv6, None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash.0.into(), &peer_using_ipv6, None) + .await; } async fn announce_a_new_peer_using_ipv4( @@ -375,7 +379,10 @@ mod tests { core_udp_tracker_services: Arc, ) -> Response { let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); + let event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = event_bus.sender(); @@ -405,7 +412,7 @@ mod tests { let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - add_a_torrent_peer_using_ipv6(&core_tracker_services.in_memory_torrent_repository); + add_a_torrent_peer_using_ipv6(&core_tracker_services.in_memory_torrent_repository).await; let response = announce_a_new_peer_using_ipv4(Arc::new(core_tracker_services), Arc::new(core_udp_tracker_services)).await; @@ -504,7 +511,8 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; let external_ip_in_tracker_configuration = core_tracker_services.core_config.net.external_ip.unwrap(); @@ -538,6 +546,7 @@ mod tests { use bittorrent_udp_tracker_core::services::announce::AnnounceService; use mockall::predicate::eq; use torrust_tracker_configuration::Core; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event, UdpRequestKind}; @@ -586,7 +595,8 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer_id) @@ -683,13 +693,14 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; // When using IPv6 the tracker converts the remote client ip into a IPv4 address assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); } - fn add_a_torrent_peer_using_ipv4(in_memory_torrent_repository: &Arc) { + async fn add_a_torrent_peer_using_ipv4(in_memory_torrent_repository: &Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -701,8 +712,9 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv4, None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash.0.into(), &peer_using_ipv4, None) + .await; } async fn announce_a_new_peer_using_ipv6( @@ -711,11 +723,14 @@ mod tests { whitelist_authorization: Arc, ) -> Response { let udp_core_broadcaster = Broadcaster::default(); - let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = server_event_bus.sender(); @@ -755,7 +770,7 @@ mod tests { let (core_tracker_services, _core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - add_a_torrent_peer_using_ipv4(&core_tracker_services.in_memory_torrent_repository); + add_a_torrent_peer_using_ipv4(&core_tracker_services.in_memory_torrent_repository).await; let response = announce_a_new_peer_using_ipv6( core_tracker_services.core_config.clone(), @@ -938,7 +953,7 @@ mod tests { .await .unwrap(); - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()).await; let external_ip_in_tracker_configuration = core_config.net.external_ip.unwrap(); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index 1244a6a3b..961189945 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -63,6 +63,7 @@ mod tests { use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::connect::ConnectService; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event, UdpRequestKind}; @@ -84,11 +85,14 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = server_event_bus.sender(); @@ -123,11 +127,14 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = server_event_bus.sender(); @@ -162,12 +169,15 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = server_event_bus.sender(); diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index d39ad0972..ca834c006 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -227,6 +227,7 @@ pub(crate) mod tests { use mockall::mock; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{Configuration, Core}; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_events::sender::SendError; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; @@ -287,11 +288,14 @@ pub(crate) mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); let udp_core_broadcaster = Broadcaster::default(); - let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = server_event_bus.sender(); diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index 5cc84acd6..e35e118b4 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -92,6 +92,7 @@ mod tests { }; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::bus::EventBus; @@ -163,7 +164,9 @@ mod tests { .with_number_of_bytes_left(0) .into(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer, None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash.0.into(), &peer, None) + .await; } fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { @@ -181,7 +184,7 @@ mod tests { core_udp_tracker_services: Arc, ) -> Response { let udp_server_broadcaster = Broadcaster::default(); - let event_bus = Arc::new(EventBus::new(false, udp_server_broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_server_broadcaster.clone())); let udp_server_stats_event_sender = event_bus.sender(); diff --git a/packages/udp-tracker-server/src/statistics/services.rs b/packages/udp-tracker-server/src/statistics/services.rs index a2215067b..c8b24a744 100644 --- a/packages/udp-tracker-server/src/statistics/services.rs +++ b/packages/udp-tracker-server/src/statistics/services.rs @@ -66,7 +66,7 @@ pub async fn get_metrics( ban_service: Arc>, stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; let stats = stats_repository.get_stats().await; let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 488743eb9..89d700132 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -7,14 +7,14 @@ schema_version = "2.0.0" threshold = "info" [core] -#inactive_peer_cleanup_interval = 60 +inactive_peer_cleanup_interval = 60 listed = false private = false [core.tracker_policy] -#max_peer_timeout = 30 +max_peer_timeout = 30 persistent_torrent_completed_stat = true -#remove_peerless_torrents = true +remove_peerless_torrents = true [[udp_trackers]] bind_address = "0.0.0.0:6868" diff --git a/src/app.rs b/src/app.rs index 93035ee99..ca8b7a5c3 100644 --- a/src/app.rs +++ b/src/app.rs @@ -72,9 +72,11 @@ async fn load_data_from_database(config: &Configuration, app_container: &Arc) -> JobManager { let mut job_manager = JobManager::new(); + start_torrent_repository_event_listener(config, app_container, &mut job_manager); start_http_core_event_listener(config, app_container, &mut job_manager); start_udp_core_event_listener(config, app_container, &mut job_manager); start_udp_server_event_listener(config, app_container, &mut job_manager); + start_the_udp_instances(config, app_container, &mut job_manager).await; start_the_http_instances(config, app_container, &mut job_manager).await; start_the_http_api(config, app_container, &mut job_manager).await; @@ -126,6 +128,18 @@ fn load_torrents_from_database(config: &Configuration, app_container: &Arc, + job_manager: &mut JobManager, +) { + let opt_handle = jobs::torrent_repository::start_event_listener(config, app_container); + + if let Some(handle) = opt_handle { + job_manager.push("torrent_repository_event_listener", handle); + } +} + fn start_http_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { let opt_handle = jobs::http_tracker_core::start_event_listener(config, app_container); diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index 2e3d798ad..b311c6da6 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -11,6 +11,7 @@ pub mod http_tracker; pub mod http_tracker_core; pub mod manager; pub mod torrent_cleanup; +pub mod torrent_repository; pub mod tracker_apis; pub mod udp_tracker; pub mod udp_tracker_core; diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index 0107b5370..8a3a71a44 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -45,7 +45,7 @@ pub fn start_job(config: &Core, torrents_manager: &Arc) -> Join if let Some(torrents_manager) = weak_torrents_manager.upgrade() { let start_time = Utc::now().time(); tracing::info!("Cleaning up torrents (executed every {} secs) ...", interval_in_secs); - torrents_manager.cleanup_torrents(); + torrents_manager.cleanup_torrents().await; tracing::info!("Cleaned up torrents in: {} ms", (Utc::now().time() - start_time).num_milliseconds()); } else { break; diff --git a/src/bootstrap/jobs/torrent_repository.rs b/src/bootstrap/jobs/torrent_repository.rs new file mode 100644 index 000000000..2125de554 --- /dev/null +++ b/src/bootstrap/jobs/torrent_repository.rs @@ -0,0 +1,20 @@ +use std::sync::Arc; + +use tokio::task::JoinHandle; +use torrust_tracker_configuration::Configuration; + +use crate::container::AppContainer; + +pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { + if config.core.tracker_usage_statistics { + let job = torrust_tracker_torrent_repository::statistics::event::listener::run_event_listener( + app_container.torrent_repository_container.event_bus.receiver(), + &app_container.torrent_repository_container.stats_repository, + ); + + Some(job) + } else { + tracing::info!("HTTP tracker core event listener job is disabled."); + None + } +} diff --git a/src/container.rs b/src/container.rs index 93f1fb4d7..98c455780 100644 --- a/src/container.rs +++ b/src/container.rs @@ -9,6 +9,7 @@ use bittorrent_udp_tracker_core::{self}; use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{Configuration, HttpApi}; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use tracing::instrument; @@ -28,6 +29,9 @@ pub struct AppContainer { // Registar pub registar: Arc, + // Torrent Repository + pub torrent_repository_container: Arc, + // Core pub tracker_core_container: Arc, @@ -54,9 +58,18 @@ impl AppContainer { let registar = Arc::new(Registar::default()); + // Torrent Repository + + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); + // Core - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &torrent_repository_container, + )); // HTTP @@ -84,6 +97,9 @@ impl AppContainer { // Registar registar, + // Torrent Repository + torrent_repository_container, + // Core tracker_core_container, @@ -128,10 +144,15 @@ impl AppContainer { #[must_use] pub fn tracker_http_api_container(&self, http_api_config: &Arc) -> Arc { TrackerHttpApiCoreContainer { - tracker_core_container: self.tracker_core_container.clone(), http_api_config: http_api_config.clone(), - ban_service: self.udp_tracker_core_services.ban_service.clone(), + + torrent_repository_container: self.torrent_repository_container.clone(), + + tracker_core_container: self.tracker_core_container.clone(), + http_stats_repository: self.http_tracker_core_services.stats_repository.clone(), + + ban_service: self.udp_tracker_core_services.ban_service.clone(), udp_core_stats_repository: self.udp_tracker_core_services.stats_repository.clone(), udp_server_stats_repository: self.udp_tracker_server_container.stats_repository.clone(), }