diff --git a/Cargo.lock b/Cargo.lock index 2b61633c2..f9d1aca74 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -91,6 +91,12 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "anyhow" +version = "1.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" + [[package]] name = "async-attributes" version = "1.1.2" @@ -176,7 +182,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix", + "rustix 0.38.37", "slab", "tracing", "windows-sys 0.59.0", @@ -219,7 +225,7 @@ dependencies = [ "cfg-if", "event-listener 5.3.1", "futures-lite", - "rustix", + "rustix 0.38.37", "tracing", ] @@ -235,7 +241,7 @@ dependencies = [ "cfg-if", "futures-core", "futures-io", - "rustix", + "rustix 0.38.37", "signal-hook-registry", "slab", "windows-sys 0.59.0", @@ -268,12 +274,45 @@ dependencies = [ "wasm-bindgen-futures", ] +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "async-task" version = "4.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" +[[package]] +name = "async-trait" +version = "0.1.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "atomic-waker" version = "1.1.2" @@ -286,6 +325,49 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +[[package]] +name = "axum" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18ed336352031311f4e0b4dd2ff392d4fbb370777c9d18d7fc9d7359f73871" +dependencies = [ + "axum-core", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "serde_core", + "sync_wrapper", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "sync_wrapper", + "tower-layer", + "tower-service", +] + [[package]] name = "backon" version = "1.5.0" @@ -307,7 +389,7 @@ dependencies = [ "miniz_oxide", "object", "rustc-demangle", - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -316,13 +398,19 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + [[package]] name = "bindgen" version = "0.71.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" dependencies = [ - "bitflags", + "bitflags 2.6.0", "cexpr", "clang-sys", "itertools", @@ -334,6 +422,12 @@ dependencies = [ "syn 2.0.101", ] +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + [[package]] name = "bitflags" version = "2.6.0" @@ -353,6 +447,82 @@ dependencies = [ "piper", ] +[[package]] +name = "bollard" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899ca34eb6924d6ec2a77c6f7f5c7339e60fd68235eaf91edd5a15f12958bb06" +dependencies = [ + "async-stream", + "base64 0.22.1", + "bitflags 2.6.0", + "bollard-buildkit-proto", + "bollard-stubs", + "bytes", + "chrono", + "futures-core", + "futures-util", + "hex", + "home", + "http", + "http-body-util", + "hyper", + "hyper-named-pipe", + "hyper-rustls", + "hyper-util", + "hyperlocal", + "log", + "num", + "pin-project-lite", + "rand", + "rustls", + "rustls-native-certs", + "rustls-pemfile", + "rustls-pki-types", + "serde", + "serde_derive", + "serde_json", + "serde_repr", + "serde_urlencoded", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util", + "tonic", + "tower-service", + "url", + "winapi", +] + +[[package]] +name = "bollard-buildkit-proto" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40b3e79f8bd0f25f32660e3402afca46fd91bebaf135af017326d905651f8107" +dependencies = [ + "prost", + "prost-types", + "tonic", + "ureq", +] + +[[package]] +name = "bollard-stubs" +version = "1.48.3-rc.28.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64ea257e555d16a2c01e5593f40b73865cdf12efbceda33c6d14a2d8d1490368" +dependencies = [ + "base64 0.22.1", + "bollard-buildkit-proto", + "bytes", + "chrono", + "prost", + "serde", + "serde_json", + "serde_repr", + "serde_with", +] + [[package]] name = "bumpalo" version = "3.16.0" @@ -365,12 +535,19 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" +[[package]] +name = "bytes" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" + [[package]] name = "cc" -version = "1.1.21" +version = "1.2.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" +checksum = "e1354349954c6fc9cb0deab020f27f783cf0b604e8bb754dc4658ecf0d29c35f" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", @@ -401,8 +578,9 @@ dependencies = [ "iana-time-zone", "js-sys", "num-traits", + "serde", "wasm-bindgen", - "windows-link", + "windows-link 0.1.1", ] [[package]] @@ -467,6 +645,16 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "core-foundation" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -512,6 +700,73 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "darling" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.101", +] + +[[package]] +name = "darling_macro" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" +dependencies = [ + "darling_core", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "deranged" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a41953f86f8a05768a6cda24def994fd2f424b04ec5c719cf89989779f199071" +dependencies = [ + "powerfmt", + "serde_core", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "docker_credential" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d89dfcba45b4afad7450a99b39e751590463e45c04728cf555d36bb66940de8" +dependencies = [ + "base64 0.21.7", + "serde", + "serde_json", +] + [[package]] name = "duct" version = "0.13.7" @@ -524,6 +779,12 @@ dependencies = [ "shared_child", ] +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + [[package]] name = "either" version = "1.15.0" @@ -561,12 +822,23 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.9" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", +] + +[[package]] +name = "etcetera" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26c7b13d0780cb82722fd59f6f57f925e143427e4a75313a6c77243bf5326ae6" +dependencies = [ + "cfg-if", + "home", + "windows-sys 0.59.0", ] [[package]] @@ -602,6 +874,24 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +[[package]] +name = "filetime" +version = "0.2.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc0505cd1b6fa6580283f6bdf70a73fcf4aba1184038c90902b92b3dd0df63ed" +dependencies = [ + "cfg-if", + "libc", + "libredox", + "windows-sys 0.60.2", +] + +[[package]] +name = "find-msvc-tools" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ced73b1dacfc750a6db6c0a0c3a3853c8b41997e2e2c563dc90804ae6867959" + [[package]] name = "flate2" version = "1.0.33" @@ -612,6 +902,21 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + [[package]] name = "futures" version = "0.3.31" @@ -714,6 +1019,17 @@ dependencies = [ "slab", ] +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", +] + [[package]] name = "getrandom" version = "0.3.3" @@ -723,7 +1039,7 @@ dependencies = [ "cfg-if", "libc", "r-efi", - "wasi", + "wasi 0.14.2+wasi-0.2.4", ] [[package]] @@ -750,6 +1066,31 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "h2" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap 2.5.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + [[package]] name = "hashbrown" version = "0.14.5" @@ -762,7 +1103,7 @@ version = "7.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" dependencies = [ - "base64", + "base64 0.21.7", "byteorder", "crossbeam-channel", "flate2", @@ -777,94 +1118,383 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" [[package]] -name = "iana-time-zone" -version = "0.1.61" +name = "hex" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" -dependencies = [ - "android_system_properties", - "core-foundation-sys", - "iana-time-zone-haiku", - "js-sys", - "wasm-bindgen", - "windows-core", -] +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] -name = "iana-time-zone-haiku" -version = "0.1.2" +name = "home" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" dependencies = [ - "cc", + "windows-sys 0.59.0", ] [[package]] -name = "indexmap" -version = "2.5.0" +name = "http" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ - "equivalent", - "hashbrown", + "bytes", + "fnv", + "itoa", ] [[package]] -name = "is_terminal_polyfill" -version = "1.70.1" +name = "http-body" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] [[package]] -name = "itertools" -version = "0.13.0" +name = "http-body-util" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ - "either", + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", ] [[package]] -name = "itoa" -version = "1.0.11" +name = "httparse" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] -name = "jiff" -version = "0.2.14" +name = "httpdate" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a194df1107f33c79f4f93d02c80798520551949d59dfad22b6157048a88cca93" -dependencies = [ - "jiff-static", - "log", - "portable-atomic", - "portable-atomic-util", - "serde", -] +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] -name = "jiff-static" -version = "0.2.14" +name = "hyper" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c6e1db7ed32c6c71b759497fae34bf7933636f75a251b9e736555da426f6442" +checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.101", + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "pin-utils", + "smallvec", + "tokio", + "want", ] [[package]] -name = "jobserver" -version = "0.1.32" +name = "hyper-named-pipe" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +checksum = "73b7d8abf35697b81a825e386fc151e0d503e8cb5fcb93cc8669c376dfd6f278" dependencies = [ - "libc", + "hex", + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", + "winapi", ] [[package]] -name = "js-sys" +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", +] + +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "libc", + "pin-project-lite", + "socket2 0.6.0", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "hyperlocal" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "986c5ce3b994526b3cd75578e62554abd09f0899d6206de48b3e96ab34ccc8c7" +dependencies = [ + "hex", + "http-body-util", + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icu_collections" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" + +[[package]] +name = "icu_properties" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "potential_utf", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" + +[[package]] +name = "icu_provider" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +dependencies = [ + "displaydoc", + "icu_locale_core", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", + "serde", +] + +[[package]] +name = "indexmap" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +dependencies = [ + "equivalent", + "hashbrown 0.14.5", + "serde", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" + +[[package]] +name = "jiff" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a194df1107f33c79f4f93d02c80798520551949d59dfad22b6157048a88cca93" +dependencies = [ + "jiff-static", + "log", + "portable-atomic", + "portable-atomic-util", + "serde", +] + +[[package]] +name = "jiff-static" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c6e1db7ed32c6c71b759497fae34bf7933636f75a251b9e736555da426f6442" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "jobserver" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +dependencies = [ + "libc", +] + +[[package]] +name = "js-sys" version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" @@ -903,7 +1533,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a793df0d7afeac54f95b471d3af7f0d4fb975699f972341a4b76988d49cdf0c" dependencies = [ "cfg-if", - "windows-targets", + "windows-targets 0.52.6", +] + +[[package]] +name = "libredox" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" +dependencies = [ + "bitflags 2.6.0", + "libc", + "redox_syscall 0.5.17", ] [[package]] @@ -924,6 +1565,18 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "linux-raw-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + +[[package]] +name = "litemap" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" + [[package]] name = "log" version = "0.4.27" @@ -949,12 +1602,24 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" +[[package]] +name = "matchit" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" + [[package]] name = "memchr" version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -970,6 +1635,17 @@ dependencies = [ "adler2", ] +[[package]] +name = "mio" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" +dependencies = [ + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.59.0", +] + [[package]] name = "nom" version = "7.1.3" @@ -980,6 +1656,76 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "num" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.19" @@ -1025,6 +1771,12 @@ version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +[[package]] +name = "openssl-probe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" + [[package]] name = "openssl-src" version = "300.3.2+3.3.2" @@ -1064,10 +1816,61 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] -name = "pin-project-lite" -version = "0.2.14" +name = "parse-display" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "914a1c2265c98e2446911282c6ac86d8524f495792c38c5bd884f80499c7538a" +dependencies = [ + "parse-display-derive", + "regex", + "regex-syntax", +] + +[[package]] +name = "parse-display-derive" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ae7800a4c974efd12df917266338e79a7a74415173caf7e70aa0a0707345281" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "regex-syntax", + "structmeta", + "syn 2.0.101", +] + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -1102,7 +1905,7 @@ dependencies = [ "concurrent-queue", "hermit-abi", "pin-project-lite", - "rustix", + "rustix 0.38.37", "tracing", "windows-sys 0.59.0", ] @@ -1122,6 +1925,21 @@ dependencies = [ "portable-atomic", ] +[[package]] +name = "potential_utf" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" +dependencies = [ + "zerovec", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.21" @@ -1150,6 +1968,38 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "prost-types" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" +dependencies = [ + "prost", +] + [[package]] name = "quote" version = "1.0.40" @@ -1191,13 +2041,14 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom", + "getrandom 0.3.3", ] [[package]] name = "rdkafka" version = "0.38.0" dependencies = [ + "anyhow", "async-std", "backon", "chrono", @@ -1219,6 +2070,7 @@ dependencies = [ "serde_json", "slab", "smol", + "testcontainers-modules", "tokio", "tracing", ] @@ -1239,6 +2091,44 @@ dependencies = [ "zstd-sys", ] +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "redox_syscall" +version = "0.5.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77" +dependencies = [ + "bitflags 2.6.0", +] + +[[package]] +name = "ref-cast" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "regex" version = "1.11.1" @@ -1268,6 +2158,20 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.16", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + [[package]] name = "rustc-demangle" version = "0.1.24" @@ -1286,13 +2190,82 @@ version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ - "bitflags", + "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys", + "linux-raw-sys 0.4.14", "windows-sys 0.52.0", ] +[[package]] +name = "rustix" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" +dependencies = [ + "bitflags 2.6.0", + "errno", + "libc", + "linux-raw-sys 0.11.0", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustls" +version = "0.23.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" +dependencies = [ + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +dependencies = [ + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +dependencies = [ + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10b3f4191e8a80e6b43eebabfac91e5dcecebb27a71f04e820c47ec41d314bf" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + [[package]] name = "ryu" version = "1.0.18" @@ -1312,20 +2285,86 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "schannel" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" +dependencies = [ + "windows-sys 0.61.1", +] + +[[package]] +name = "schemars" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + +[[package]] +name = "schemars" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82d20c4491bc164fa2f6c5d44565947a52ad80b9505d8e36f8d54c27c739fcd0" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + +[[package]] +name = "security-framework" +version = "3.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" +dependencies = [ + "bitflags 2.6.0", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "serde" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", @@ -1344,6 +2383,61 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_repr" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_with" +version = "3.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c522100790450cf78eeac1507263d0a350d4d5b30df0c8e1fe051a10c22b376e" +dependencies = [ + "base64 0.22.1", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.5.0", + "schemars 0.9.0", + "schemars 1.0.4", + "serde", + "serde_derive", + "serde_json", + "serde_with_macros", + "time", +] + +[[package]] +name = "serde_with_macros" +version = "3.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327ada00f7d64abaac1e55a6911e90cf665aa051b9a561c7006c157f4633135e" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "shared_child" version = "1.0.1" @@ -1378,6 +2472,12 @@ dependencies = [ "autocfg", ] +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + [[package]] name = "smol" version = "2.0.2" @@ -1395,32 +2495,204 @@ dependencies = [ "futures-lite", ] +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "socket2" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "strsim" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" +[[package]] +name = "structmeta" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e1575d8d40908d70f6fd05537266b90ae71b15dbbe7a8b7dffa2b759306d329" +dependencies = [ + "proc-macro2", + "quote", + "structmeta-derive", + "syn 2.0.101", +] + +[[package]] +name = "structmeta-derive" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + [[package]] name = "syn" version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "testcontainers" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b92bce247dc9260a19808321e11b51ea6a0293d02b48ab1c6578960610cfa2a7" +dependencies = [ + "async-trait", + "bollard", + "bollard-stubs", + "bytes", + "docker_credential", + "either", + "etcetera", + "futures", + "log", + "memchr", + "parse-display", + "pin-project-lite", + "serde", + "serde_json", + "serde_with", + "thiserror", + "tokio", + "tokio-stream", + "tokio-tar", + "tokio-util", + "ulid", + "url", +] + +[[package]] +name = "testcontainers-modules" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1966329d5bb3f89d33602d2db2da971fb839f9297dad16527abf4564e2ae0a6d" +dependencies = [ + "testcontainers", +] + +[[package]] +name = "thiserror" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "time" +version = "0.3.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" + +[[package]] +name = "time-macros" +version = "0.2.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" +dependencies = [ + "num-conv", + "time-core", ] [[package]] -name = "syn" -version = "2.0.101" +name = "tinystr" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", + "displaydoc", + "zerovec", ] [[package]] @@ -1430,8 +2702,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165" dependencies = [ "backtrace", + "bytes", + "libc", + "mio", "pin-project-lite", + "socket2 0.5.10", "tokio-macros", + "windows-sys 0.52.0", ] [[package]] @@ -1445,6 +2722,55 @@ dependencies = [ "syn 2.0.101", ] +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-tar" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d5714c010ca3e5c27114c1cdeb9d14641ace49874aa5626d7149e47aedace75" +dependencies = [ + "filetime", + "futures-core", + "libc", + "redox_syscall 0.3.5", + "tokio", + "tokio-stream", + "xattr", +] + +[[package]] +name = "tokio-util" +version = "0.7.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + [[package]] name = "toml_datetime" version = "0.6.8" @@ -1457,11 +2783,71 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap", + "indexmap 2.5.0", "toml_datetime", "winnow", ] +[[package]] +name = "tonic" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e581ba15a835f4d9ea06c55ab1bd4dce26fc53752c69a04aac00703bfb49ba9" +dependencies = [ + "async-trait", + "axum", + "base64 0.22.1", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "prost", + "socket2 0.5.10", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 2.5.0", + "pin-project-lite", + "slab", + "sync_wrapper", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + [[package]] name = "tracing" version = "0.1.41" @@ -1493,12 +2879,67 @@ dependencies = [ "once_cell", ] +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "ulid" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "470dbf6591da1b39d43c14523b2b469c86879a53e8b758c8e090a470fe7b1fbe" +dependencies = [ + "rand", + "web-time", +] + [[package]] name = "unicode-ident" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "ureq" +version = "2.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02d1a66277ed75f640d608235660df48c8e3c19f3b4edb6a263315626cc3c01d" +dependencies = [ + "base64 0.22.1", + "log", + "once_cell", + "rustls", + "rustls-pki-types", + "url", + "webpki-roots 0.26.11", +] + +[[package]] +name = "url" +version = "2.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -1517,6 +2958,21 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + [[package]] name = "wasi" version = "0.14.2+wasi-0.2.4" @@ -1603,13 +3059,63 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.2", +] + +[[package]] +name = "webpki-roots" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + [[package]] name = "windows-core" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -1618,13 +3124,19 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" +[[package]] +name = "windows-link" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" + [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -1633,7 +3145,25 @@ version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.4", +] + +[[package]] +name = "windows-sys" +version = "0.61.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f109e41dd4a3c848907eb83d5a42ea98b3769495597450cf6d153507b166f0f" +dependencies = [ + "windows-link 0.2.0", ] [[package]] @@ -1642,14 +3172,31 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d42b7b7f66d2a06854650af09cfdf8713e427a439c97ad65a6375318033ac4b" +dependencies = [ + "windows-link 0.2.0", + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", ] [[package]] @@ -1658,48 +3205,96 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + [[package]] name = "winnow" version = "0.5.40" @@ -1715,7 +3310,47 @@ version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ - "bitflags", + "bitflags 2.6.0", +] + +[[package]] +name = "writeable" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" + +[[package]] +name = "xattr" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156" +dependencies = [ + "libc", + "rustix 1.1.2", +] + +[[package]] +name = "yoke" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", + "synstructure", ] [[package]] @@ -1738,6 +3373,66 @@ dependencies = [ "syn 2.0.101", ] +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" + +[[package]] +name = "zerotrie" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "zstd-sys" version = "2.0.15+zstd.1.5.7" diff --git a/Cargo.toml b/Cargo.toml index e556fd18c..2eadee201 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,6 +42,8 @@ rand = "0.9.1" regex = "1.11.1" smol = "2.0.2" tokio = { version = "1.18", features = ["macros", "rt-multi-thread", "time"] } +testcontainers-modules = { version = "0.13.0", features = ["kafka"] } +anyhow = { version = "1.0.100" } # These features are re-exports of the features that the rdkafka-sys crate # provides. See the rdkafka-sys documentation for details. diff --git a/src/consumer/base_consumer.rs b/src/consumer/base_consumer.rs index a25e36611..f69ccffc0 100644 --- a/src/consumer/base_consumer.rs +++ b/src/consumer/base_consumer.rs @@ -1,6 +1,7 @@ //! Low-level consumers. use std::ffi::{CStr, CString}; +use std::fmt; use std::mem::ManuallyDrop; use std::os::raw::c_void; use std::ptr; @@ -41,6 +42,20 @@ where nonempty_callback: Option>>, } +impl fmt::Debug for BaseConsumer +where + C: ConsumerContext, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BaseConsumer") + .field("native_ptr", &self.client.native_ptr()) + .field("queue", &self.queue) + .field("group_id", &self.group_id) + .field("has_nonempty_callback", &self.nonempty_callback.is_some()) + .finish() + } +} + impl FromClientConfig for BaseConsumer { fn from_config(config: &ClientConfig) -> KafkaResult { BaseConsumer::from_config_and_context(config, DefaultConsumerContext) diff --git a/src/producer/base_producer.rs b/src/producer/base_producer.rs index 0841bafba..7f6d82478 100644 --- a/src/producer/base_producer.rs +++ b/src/producer/base_producer.rs @@ -42,6 +42,7 @@ //! should wait and try again. use std::ffi::{CStr, CString}; +use std::fmt; use std::marker::PhantomData; use std::mem; use std::os::raw::c_void; @@ -340,6 +341,19 @@ where _partitioner: PhantomData, } +impl fmt::Debug for BaseProducer +where + Part: Partitioner, + C: ProducerContext, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BaseProducer") + .field("native_ptr", &self.native_ptr()) + .field("queue", &self.queue) + .finish() + } +} + impl BaseProducer where Part: Partitioner, diff --git a/tests/admin.rs b/tests/admin.rs new file mode 100644 index 000000000..fde2cbf58 --- /dev/null +++ b/tests/admin.rs @@ -0,0 +1,672 @@ +use crate::utils::admin::create_topic; +use crate::utils::containers::KafkaContext; +use crate::utils::logging::init_test_logger; +use crate::utils::rand::{rand_test_group, rand_test_topic}; +use crate::utils::{get_broker_version, KafkaVersion}; +use backon::{BlockingRetryable, ExponentialBuilder}; +use rdkafka::admin::{ + AdminClient, AdminOptions, AlterConfig, ConfigEntry, ConfigSource, GroupResult, NewPartitions, + NewTopic, OwnedResourceSpecifier, ResourceSpecifier, TopicReplication, +}; +use rdkafka::client::DefaultClientContext; +use rdkafka::error::KafkaError; +use rdkafka::producer::{FutureRecord, Producer}; +use rdkafka::{ClientConfig, Offset, TopicPartitionList}; +use rdkafka_sys::RDKafkaErrorCode; +use std::time::Duration; + +#[path = "utils/mod.rs"] +mod utils; + +/// Validates thast topics can be properly created. +#[tokio::test] +pub async fn test_topic_creation() { + init_test_logger(); + + // Get Kafka container context. + let kafka_context_result = KafkaContext::shared().await; + let Ok(kafka_context) = kafka_context_result else { + panic!( + "could not create kafka context: {}", + kafka_context_result.unwrap_err() + ); + }; + let test_topic_name = rand_test_topic("testing-topic"); + + let admin_client_result = + utils::admin::create_admin_client(&kafka_context.bootstrap_servers).await; + let Ok(admin_client) = admin_client_result else { + panic!( + "could not create admin client: {}", + admin_client_result.unwrap_err() + ); + }; + + let create_topic_result = create_topic(&admin_client, &test_topic_name).await; + if create_topic_result.is_err() { + panic!( + "could not create topic: {}", + create_topic_result.unwrap_err() + ); + }; +} + +/// Verify that topics are created as specified, and that they can later +/// be deleted. +#[tokio::test] +pub async fn test_topic_create_and_delete() { + init_test_logger(); + + // Get Kafka container context. + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); + + // Create admin client + let admin_client = utils::admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("could not create admin client"); + let opts = AdminOptions::new().operation_timeout(Some(Duration::from_secs(30))); + + // Create consumer client + let consumer_client = + utils::consumer::create_unsubscribed_base_consumer(&kafka_context.bootstrap_servers, None) + .await + .expect("could not create consumer client"); + + let topic_name_1 = rand_test_topic("test_topics"); + let topic_name_2 = rand_test_topic("test_topics"); + let topic1 = NewTopic::new(&topic_name_1, 1, TopicReplication::Fixed(1)) + .set("max.message.bytes", "1234"); + let topic2 = NewTopic { + name: &topic_name_2, + num_partitions: 3, + replication: TopicReplication::Variable(&[ + &[utils::BROKER_ID], + &[utils::BROKER_ID], + &[utils::BROKER_ID], + ]), + config: Vec::new(), + }; + + // Topics created + let topic_results = admin_client + .create_topics(&[topic1, topic2], &opts) + .await + .expect("topic creation failed"); + assert_eq!( + topic_results, + &[Ok(topic_name_1.clone()), Ok(topic_name_2.clone())] + ); + + // Verify metadata + let metadata1 = utils::consumer::fetch_consumer_metadata(&consumer_client, &topic_name_1) + .expect(&format!("failed to fetch metadata for {}", &topic_name_1)); + let metadata2 = utils::consumer::fetch_consumer_metadata(&consumer_client, &topic_name_2) + .expect(&format!("failed to fetch metadata for {}", topic_name_2)); + assert_eq!(1, metadata1.topics().len()); + assert_eq!(1, metadata2.topics().len()); + let metadata_topic1 = &metadata1.topics()[0]; + let metadata_topic2 = &metadata2.topics()[0]; + assert_eq!(&topic_name_1, metadata_topic1.name()); + assert_eq!(&topic_name_2, metadata_topic2.name()); + assert_eq!(1, metadata_topic1.partitions().len()); + assert_eq!(3, metadata_topic2.partitions().len()); + + // Verifying topic configurations + let config_resource_results = admin_client + .describe_configs( + &[ + ResourceSpecifier::Topic(&topic_name_1), + ResourceSpecifier::Topic(&topic_name_2), + ], + &opts, + ) + .await + .expect("could not describe configs"); + let topic_config1 = &config_resource_results[0] + .as_ref() + .expect(&format!("failed to describe config for {}", &topic_name_1)); + let topic_config2 = &config_resource_results[1] + .as_ref() + .expect(&format!("failed to describe config for {}", &topic_name_2)); + let mut expected_entry1 = ConfigEntry { + name: "max.message.bytes".into(), + value: Some("1234".into()), + source: ConfigSource::DynamicTopic, + is_read_only: false, + is_default: false, + is_sensitive: false, + }; + let default_max_msg_bytes = if get_broker_version(&kafka_context) <= KafkaVersion(2, 3, 0, 0) { + "1000012" + } else { + "1048588" + }; + let expected_entry2 = ConfigEntry { + name: "max.message.bytes".into(), + value: Some(default_max_msg_bytes.into()), + source: ConfigSource::Default, + is_read_only: false, + is_default: true, + is_sensitive: false, + }; + if get_broker_version(&kafka_context) < KafkaVersion(1, 1, 0, 0) { + expected_entry1.source = ConfigSource::Unknown; + } + assert_eq!( + Some(&expected_entry1), + topic_config1.get("max.message.bytes") + ); + assert_eq!( + Some(&expected_entry2), + topic_config2.get("max.message.bytes") + ); + let config_entries1 = topic_config1.entry_map(); + let config_entries2 = topic_config2.entry_map(); + assert_eq!(topic_config1.entries.len(), config_entries1.len()); + assert_eq!(topic_config2.entries.len(), config_entries2.len()); + assert_eq!( + Some(&&expected_entry1), + config_entries1.get("max.message.bytes") + ); + assert_eq!( + Some(&&expected_entry2), + config_entries2.get("max.message.bytes") + ); + + let partitions1 = NewPartitions::new(&topic_name_1, 5); + let res = admin_client + .create_partitions(&[partitions1], &opts) + .await + .expect("partition creation failed"); + assert_eq!(res, &[Ok(topic_name_1.clone())]); + + let mut tries = 0; + loop { + let metadata = utils::consumer::fetch_consumer_metadata(&consumer_client, &topic_name_1) + .expect(&format!("failed to fetch metadata for {}", &topic_name_1)); + let topic = &metadata.topics()[0]; + let n = topic.partitions().len(); + if n == 5 { + break; + } else if tries >= 5 { + panic!("topic has {} partitions, but expected {}", n, 5); + } else { + tries += 1; + tokio::time::sleep(Duration::from_secs(1)).await; + } + } + + let res = admin_client + .delete_topics(&[&topic_name_1, &topic_name_2], &opts) + .await + .expect("topic deletion failed"); + assert_eq!(res, &[Ok(topic_name_1.clone()), Ok(topic_name_2.clone())]); + utils::consumer::verify_topic_deleted(&consumer_client, &topic_name_1) + .expect(&format!("could not delete topic for {}", &topic_name_1)); + utils::consumer::verify_topic_deleted(&consumer_client, &topic_name_2) + .expect(&format!("could not delete topic for {}", &topic_name_2)); +} + +/// Verify that incorrect replication configurations are ignored when +/// creating topics. +#[tokio::test] +pub async fn test_incorrect_replication_factors_are_ignored_when_creating_topics() { + init_test_logger(); + + // Get Kafka container context. + let kafka_context_result = KafkaContext::shared().await; + let Ok(kafka_context) = kafka_context_result else { + panic!( + "could not create kafka context: {}", + kafka_context_result.unwrap_err() + ); + }; + + let admin_client_result = + utils::admin::create_admin_client(&kafka_context.bootstrap_servers).await; + let Ok(admin_client) = admin_client_result else { + panic!( + "could not create admin client: {}", + admin_client_result.unwrap_err() + ); + }; + let opts = AdminOptions::new().operation_timeout(Some(Duration::from_secs(30))); + + let topic = NewTopic::new( + "ignored", + 1, + TopicReplication::Variable(&[&[utils::BROKER_ID], &[utils::BROKER_ID]]), + ); + let res = admin_client.create_topics(&[topic], &opts).await; + assert_eq!( + Err(KafkaError::AdminOpCreation( + "replication configuration for topic 'ignored' assigns 2 partition(s), \ + which does not match the specified number of partitions (1)" + .into() + )), + res, + ) +} + +/// Verify that incorrect replication configurations are ignored when +/// creating partitions. +#[tokio::test] +pub async fn test_incorrect_replication_factors_are_ignored_when_creating_partitions() { + init_test_logger(); + + // Get Kafka container context. + let kafka_context_result = KafkaContext::shared().await; + let Ok(kafka_context) = kafka_context_result else { + panic!( + "could not create kafka context: {}", + kafka_context_result.unwrap_err() + ); + }; + + let admin_client_result = + utils::admin::create_admin_client(&kafka_context.bootstrap_servers).await; + let Ok(admin_client) = admin_client_result else { + panic!( + "could not create admin client: {}", + admin_client_result.unwrap_err() + ); + }; + let opts = AdminOptions::new().operation_timeout(Some(Duration::from_secs(30))); + + // Create consumer client + let consumer_client = + utils::consumer::create_unsubscribed_base_consumer(&kafka_context.bootstrap_servers, None) + .await + .expect("could not create consumer client"); + + let name = rand_test_topic("test_topics"); + let topic = NewTopic::new(&name, 1, TopicReplication::Fixed(1)); + + let res = admin_client + .create_topics(vec![&topic], &opts) + .await + .expect("topic creation failed"); + assert_eq!(res, &[Ok(name.clone())]); + let _ = utils::consumer::fetch_consumer_metadata(&consumer_client, &name); + + // This partition specification is obviously garbage, and so trips + // a client-side error. + let partitions = NewPartitions::new(&name, 2).assign(&[&[0], &[0], &[0]]); + let res = admin_client.create_partitions(&[partitions], &opts).await; + assert_eq!( + res, + Err(KafkaError::AdminOpCreation(format!( + "partition assignment for topic '{}' assigns 3 partition(s), \ + which is more than the requested total number of partitions (2)", + name + ))) + ); + + // Only the server knows that this partition specification is garbage. + let partitions = NewPartitions::new(&name, 2).assign(&[&[0], &[0]]); + let res = admin_client + .create_partitions(&[partitions], &opts) + .await + .expect("partition creation failed"); + assert_eq!( + res, + &[Err((name, RDKafkaErrorCode::InvalidReplicaAssignment))], + ); +} + +/// Verify that deleting a non-existent topic fails. +#[tokio::test] +pub async fn test_delete_nonexistent_topics() { + init_test_logger(); + + // Get Kafka container context. + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); + + // Create admin client + let admin_client = utils::admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("could not create admin client"); + let opts = AdminOptions::new().operation_timeout(Some(Duration::from_secs(30))); + + let name = rand_test_topic("test_topics"); + let res = admin_client + .delete_topics(&[&name], &opts) + .await + .expect("delete topics failed"); + assert_eq!( + res, + &[Err((name, RDKafkaErrorCode::UnknownTopicOrPartition))] + ); +} + +/// Verify that mixed-success operations properly report the successful and +/// failing operators. +#[tokio::test] +pub async fn test_mixed_success_results() { + init_test_logger(); + + // Get Kafka container context. + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); + + // Create admin client + let admin_client = utils::admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("could not create admin client"); + let opts = AdminOptions::new().operation_timeout(Some(Duration::from_secs(30))); + + // Create consumer client + let consumer_client = + utils::consumer::create_unsubscribed_base_consumer(&kafka_context.bootstrap_servers, None) + .await + .expect("could not create consumer client"); + + let name1 = rand_test_topic("test_topics"); + let name2 = rand_test_topic("test_topics"); + + let topic1 = NewTopic::new(&name1, 1, TopicReplication::Fixed(1)); + let topic2 = NewTopic::new(&name2, 1, TopicReplication::Fixed(1)); + + let res = admin_client + .create_topics(vec![&topic1], &opts) + .await + .expect("topic creation failed"); + assert_eq!(res, &[Ok(name1.clone())]); + let _ = utils::consumer::fetch_consumer_metadata(&consumer_client, &name1) + .expect(&format!("could not fetch consumer metadata for {}", name1)); + + let res = admin_client + .create_topics(vec![&topic1, &topic2], &opts) + .await + .expect("topic creation failed"); + assert_eq!( + res, + &[ + Err((name1.clone(), RDKafkaErrorCode::TopicAlreadyExists)), + Ok(name2.clone()) + ] + ); + let _ = utils::consumer::fetch_consumer_metadata(&consumer_client, &name2) + .expect(&format!("could not fetch consumer metadata for {}", name2)); + + let res = admin_client + .delete_topics(&[&name1], &opts) + .await + .expect("topic deletion failed"); + assert_eq!(res, &[Ok(name1.clone())]); + utils::consumer::verify_topic_deleted(&consumer_client, &name1) + .expect(&format!("could not verify topic \"{}\" was deleted", name1)); + + let res = admin_client + .delete_topics(&[&name2, &name1], &opts) + .await + .expect("topic deletion failed"); + assert_eq!( + res, + &[ + Ok(name2.clone()), + Err((name1.clone(), RDKafkaErrorCode::UnknownTopicOrPartition)) + ] + ); +} + +/// Test the admin client's delete records functionality. +#[tokio::test] +async fn test_delete_records() { + init_test_logger(); + + // Get Kafka container context. + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); + + // Create admin client + let admin_client = utils::admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("could not create admin client"); + + // Create producer client + let producer_client = + utils::producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("could not create producer_client"); + + let timeout = Some(Duration::from_secs(1)); + let opts = AdminOptions::new().operation_timeout(timeout); + let topic = rand_test_topic("test_delete_records"); + let make_record = || FutureRecord::::to(&topic).payload("data"); + + // Create a topic with a single partition. + admin_client + .create_topics( + &[NewTopic::new(&topic, 1, TopicReplication::Fixed(1))], + &opts, + ) + .await + .expect("topic creation failed"); + + // Ensure that the topic begins with low and high water marks of 0. + let (lo, hi) = (|| { + producer_client + .client() + .fetch_watermarks(&topic, 0, timeout) + }) + .retry(ExponentialBuilder::default().with_max_delay(Duration::from_secs(5))) + .call() + .unwrap(); + assert_eq!(lo, 0); + assert_eq!(hi, 0); + + // Produce five messages to the topic. + for _ in 0..5 { + producer_client.send(make_record(), timeout).await.unwrap(); + } + + // Ensure that the high water mark has advanced to 5. + let (lo, hi) = producer_client + .client() + .fetch_watermarks(&topic, 0, timeout) + .unwrap(); + assert_eq!(lo, 0); + assert_eq!(hi, 5); + + // Delete the record at offset 0. + let mut tpl = TopicPartitionList::new(); + tpl.add_partition_offset(&topic, 0, Offset::Offset(1)) + .unwrap(); + let res_tpl = admin_client.delete_records(&tpl, &opts).await.unwrap(); + assert_eq!(res_tpl.count(), 1); + assert_eq!(res_tpl.elements()[0].topic(), topic); + assert_eq!(res_tpl.elements()[0].partition(), 0); + assert_eq!(res_tpl.elements()[0].offset(), Offset::Offset(1)); + assert_eq!(res_tpl.elements()[0].error(), Ok(())); + + // Ensure that the low water mark has advanced to 1. + let (lo, hi) = producer_client + .client() + .fetch_watermarks(&topic, 0, timeout) + .unwrap(); + assert_eq!(lo, 1); + assert_eq!(hi, 5); + + // Delete the record at offset 1 and also include an invalid partition in + // the request. The invalid partition should not cause the request to fail, + // but we should be able to see the per-partition error in the returned + // topic partition list. + let mut tpl = TopicPartitionList::new(); + tpl.add_partition_offset(&topic, 0, Offset::Offset(2)) + .unwrap(); + tpl.add_partition_offset(&topic, 1, Offset::Offset(1)) + .unwrap(); + let res_tpl = admin_client.delete_records(&tpl, &opts).await.unwrap(); + assert_eq!(res_tpl.count(), 2); + assert_eq!(res_tpl.elements()[0].topic(), topic); + assert_eq!(res_tpl.elements()[0].partition(), 0); + assert_eq!(res_tpl.elements()[0].offset(), Offset::Offset(2)); + assert_eq!(res_tpl.elements()[0].error(), Ok(())); + assert_eq!(res_tpl.elements()[1].topic(), topic); + assert_eq!(res_tpl.elements()[1].partition(), 1); + assert_eq!( + res_tpl.elements()[1].error(), + Err(KafkaError::OffsetFetch(RDKafkaErrorCode::UnknownPartition)) + ); + + // Ensure that the low water mark has advanced to 2. + let (lo, hi) = producer_client + .client() + .fetch_watermarks(&topic, 0, timeout) + .unwrap(); + assert_eq!(lo, 2); + assert_eq!(hi, 5); + + // Delete all records up to offset 5. + let mut tpl = TopicPartitionList::new(); + tpl.add_partition_offset(&topic, 0, Offset::End).unwrap(); + let res_tpl = admin_client.delete_records(&tpl, &opts).await.unwrap(); + assert_eq!(res_tpl.count(), 1); + assert_eq!(res_tpl.elements()[0].topic(), topic); + assert_eq!(res_tpl.elements()[0].partition(), 0); + assert_eq!(res_tpl.elements()[0].offset(), Offset::Offset(5)); + assert_eq!(res_tpl.elements()[0].error(), Ok(())); + + // Ensure that the low water mark has advanced to 5. + let (lo, hi) = producer_client + .client() + .fetch_watermarks(&topic, 0, timeout) + .unwrap(); + assert_eq!(lo, 5); + assert_eq!(hi, 5); +} + +#[tokio::test] +async fn test_configs() { + init_test_logger(); + + // Get Kafka container context. + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); + + // Create admin client + let admin_client = utils::admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("could not create admin client"); + let opts = AdminOptions::new(); + let broker = ResourceSpecifier::Broker(utils::BROKER_ID); + + let res = admin_client + .describe_configs(&[broker], &opts) + .await + .expect("describe configs failed"); + let config = &res[0].as_ref().expect("describe configs failed"); + let orig_val = config + .get("log.flush.interval.messages") + .expect("original config entry missing") + .value + .as_ref() + .expect("original value missing"); + + let config = AlterConfig::new(broker).set("log.flush.interval.messages", "1234"); + let res = admin_client + .alter_configs(&[config], &opts) + .await + .expect("alter configs failed"); + assert_eq!(res, &[Ok(OwnedResourceSpecifier::Broker(utils::BROKER_ID))]); + + let mut tries = 0; + loop { + let res = admin_client + .describe_configs(&[broker], &opts) + .await + .expect("describe configs failed"); + let config = &res[0].as_ref().expect("describe configs failed"); + let entry = config.get("log.flush.interval.messages"); + let expected_entry = if get_broker_version(&kafka_context) < KafkaVersion(1, 1, 0, 0) { + // Pre-1.1, the AlterConfig operation will silently fail, and the + // config will remain unchanged, which I guess is worth testing. + ConfigEntry { + name: "log.flush.interval.messages".into(), + value: Some(orig_val.clone()), + source: ConfigSource::Default, + is_read_only: true, + is_default: true, + is_sensitive: false, + } + } else { + ConfigEntry { + name: "log.flush.interval.messages".into(), + value: Some("1234".into()), + source: ConfigSource::DynamicBroker, + is_read_only: false, + is_default: false, + is_sensitive: false, + } + }; + if entry == Some(&expected_entry) { + break; + } else if tries >= 5 { + panic!("{:?} != {:?}", entry, Some(&expected_entry)); + } else { + tries += 1; + tokio::time::sleep(Duration::from_secs(1)).await; + } + } + + let config = AlterConfig::new(broker).set("log.flush.interval.ms", orig_val); + let res = admin_client + .alter_configs(&[config], &opts) + .await + .expect("alter configs failed"); + assert_eq!(res, &[Ok(OwnedResourceSpecifier::Broker(utils::BROKER_ID))]); +} + +/// Tests whether each admin operation properly reports an error if the entire +/// request fails. The original implementations failed to check this, resulting +/// in confusing situations where a failed admin request would return Ok([]). +#[tokio::test] +async fn test_event_errors() { + // Configure an admin client to target a Kafka server that doesn't exist, + // then set an impossible timeout. This will ensure that every request fails + // with an OperationTimedOut error, assuming, of course, that the request + // passes client-side validation. + let admin_client = ClientConfig::new() + .set("bootstrap.servers", "noexist") + .create::>() + .expect("admin client creation failed"); + let opts = AdminOptions::new().request_timeout(Some(Duration::from_nanos(1))); + + let res = admin_client.create_topics(&[], &opts).await; + assert_eq!( + res, + Err(KafkaError::AdminOp(RDKafkaErrorCode::OperationTimedOut)) + ); + + let res = admin_client.create_partitions(&[], &opts).await; + assert_eq!( + res, + Err(KafkaError::AdminOp(RDKafkaErrorCode::OperationTimedOut)) + ); + + let res = admin_client.delete_topics(&[], &opts).await; + assert_eq!( + res, + Err(KafkaError::AdminOp(RDKafkaErrorCode::OperationTimedOut)) + ); + + let res = admin_client.describe_configs(&[], &opts).await; + assert_eq!( + res.err(), + Some(KafkaError::AdminOp(RDKafkaErrorCode::OperationTimedOut)) + ); + + let res = admin_client.alter_configs(&[], &opts).await; + assert_eq!( + res, + Err(KafkaError::AdminOp(RDKafkaErrorCode::OperationTimedOut)) + ); +} diff --git a/tests/test_low_consumers.rs b/tests/base_consumer.rs similarity index 72% rename from tests/test_low_consumers.rs rename to tests/base_consumer.rs index c97802243..3b0e4f61c 100644 --- a/tests/test_low_consumers.rs +++ b/tests/base_consumer.rs @@ -7,38 +7,56 @@ use std::sync::Arc; use std::thread; use std::time::{Duration, Instant}; +use rdkafka::admin::AdminOptions; use rdkafka::consumer::{BaseConsumer, Consumer, ConsumerContext}; use rdkafka::error::{KafkaError, RDKafkaErrorCode}; use rdkafka::topic_partition_list::{Offset, TopicPartitionList}; use rdkafka::util::{current_time_millis, Timeout}; use rdkafka::{ClientConfig, Message, Timestamp}; +use crate::utils::admin; +use crate::utils::containers::KafkaContext; +use crate::utils::logging::init_test_logger; +use crate::utils::producer; +use crate::utils::rand::*; use crate::utils::*; mod utils; -fn create_base_consumer( - group_id: &str, - config_overrides: Option>, -) -> BaseConsumer { - consumer_config(group_id, config_overrides) - .create_with_context(ConsumerTestContext { _n: 64 }) - .expect("Consumer creation failed") -} - // Seeking should allow replaying messages and skipping messages. #[tokio::test] async fn test_produce_consume_seek() { - let _r = env_logger::try_init(); + init_test_logger(); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let topic_name = rand_test_topic("test_produce_consume_seek"); - populate_topic(&topic_name, 5, &value_fn, &key_fn, Some(0), None).await; - let consumer = create_base_consumer(&rand_test_group(), None); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(1)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); + + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + produce_messages_to_partition(&producer, &topic_name, 5, 0).await; + + let group_id = rand_test_group(); + let consumer = + utils::consumer::create_base_consumer(&kafka_context.bootstrap_servers, &group_id, None) + .expect("could not create base consumer"); consumer.subscribe(&[topic_name.as_str()]).unwrap(); for (i, message) in consumer.iter().take(3).enumerate() { match message { - Ok(message) => assert_eq!(dbg!(message.offset()), i as i64), + Ok(message) => assert_eq!(message.offset(), i as i64), Err(e) => panic!("Error receiving message: {:?}", e), } } @@ -94,12 +112,31 @@ async fn test_produce_consume_seek() { // Seeking should allow replaying messages and skipping messages. #[tokio::test] async fn test_produce_consume_seek_partitions() { - let _r = env_logger::try_init(); + init_test_logger(); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let topic_name = rand_test_topic("test_produce_consume_seek_partitions"); - populate_topic(&topic_name, 30, &value_fn, &key_fn, None, None).await; + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(3)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + produce_messages(&producer, &topic_name, 30, None, None).await; - let consumer = create_base_consumer(&rand_test_group(), None); + let group_id = rand_test_group(); + let consumer = + utils::consumer::create_base_consumer(&kafka_context.bootstrap_servers, &group_id, None) + .expect("could not create base consumer"); consumer.subscribe(&[topic_name.as_str()]).unwrap(); let mut partition_offset_map = HashMap::new(); @@ -155,12 +192,33 @@ async fn test_produce_consume_seek_partitions() { // All produced messages should be consumed. #[tokio::test] async fn test_produce_consume_iter() { - let _r = env_logger::try_init(); + init_test_logger(); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let start_time = current_time_millis(); let topic_name = rand_test_topic("test_produce_consume_iter"); - let message_map = populate_topic(&topic_name, 100, &value_fn, &key_fn, None, None).await; - let consumer = create_base_consumer(&rand_test_group(), None); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(3)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); + + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + let message_map = produce_messages(&producer, &topic_name, 100, None, None).await; + + let group_id = rand_test_group(); + let consumer = + utils::consumer::create_base_consumer(&kafka_context.bootstrap_servers, &group_id, None) + .expect("could not create base consumer"); consumer.subscribe(&[topic_name.as_str()]).unwrap(); for message in consumer.iter().take(100) { @@ -194,20 +252,30 @@ async fn test_pause_resume_consumer_iter() { const MESSAGE_COUNT: i32 = 300; const MESSAGES_PER_PAUSE: i32 = MESSAGE_COUNT / PAUSE_COUNT; - let _r = env_logger::try_init(); + init_test_logger(); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let topic_name = rand_test_topic("test_pause_resume_consumer_iter"); - populate_topic( - &topic_name, - MESSAGE_COUNT, - &value_fn, - &key_fn, - Some(0), - None, - ) - .await; + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(1)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + produce_messages_to_partition(&producer, &topic_name, MESSAGE_COUNT as usize, 0).await; let group_id = rand_test_group(); - let consumer = create_base_consumer(&group_id, None); + let consumer = + utils::consumer::create_base_consumer(&kafka_context.bootstrap_servers, &group_id, None) + .expect("could not create base consumer"); consumer.subscribe(&[topic_name.as_str()]).unwrap(); for _ in 0..PAUSE_COUNT { @@ -235,17 +303,41 @@ async fn test_pause_resume_consumer_iter() { #[tokio::test] async fn test_consume_partition_order() { - let _r = env_logger::try_init(); + init_test_logger(); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let topic_name = rand_test_topic("test_consume_partition_order"); - populate_topic(&topic_name, 4, &value_fn, &key_fn, Some(0), None).await; - populate_topic(&topic_name, 4, &value_fn, &key_fn, Some(1), None).await; - populate_topic(&topic_name, 4, &value_fn, &key_fn, Some(2), None).await; + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(3)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + produce_messages_to_partition(&producer, &topic_name, 4, 0).await; + produce_messages_to_partition(&producer, &topic_name, 4, 1).await; + produce_messages_to_partition(&producer, &topic_name, 4, 2).await; // Using partition queues should allow us to consume the partitions // in a round-robin fashion. { - let consumer = Arc::new(create_base_consumer(&rand_test_group(), None)); + let group_id = rand_test_group(); + let consumer = Arc::new( + utils::consumer::create_base_consumer( + &kafka_context.bootstrap_servers, + &group_id, + None, + ) + .expect("could not create base consumer"), + ); let mut tpl = TopicPartitionList::new(); tpl.add_partition_offset(&topic_name, 0, Offset::Beginning) .unwrap(); @@ -273,7 +365,15 @@ async fn test_consume_partition_order() { // When not all partitions have been split into separate queues, the // unsplit partitions should still be accessible via the main queue. { - let consumer = Arc::new(create_base_consumer(&rand_test_group(), None)); + let group_id = rand_test_group(); + let consumer = Arc::new( + utils::consumer::create_base_consumer( + &kafka_context.bootstrap_servers, + &group_id, + None, + ) + .expect("could not create base consumer"), + ); let mut tpl = TopicPartitionList::new(); tpl.add_partition_offset(&topic_name, 0, Offset::Beginning) .unwrap(); @@ -333,7 +433,15 @@ async fn test_consume_partition_order() { // should be continuously polled to serve callbacks, but it should not panic // or result in memory unsafety, etc. { - let consumer = Arc::new(create_base_consumer(&rand_test_group(), None)); + let group_id = rand_test_group(); + let consumer = Arc::new( + utils::consumer::create_base_consumer( + &kafka_context.bootstrap_servers, + &group_id, + None, + ) + .expect("could not create base consumer"), + ); let mut tpl = TopicPartitionList::new(); tpl.add_partition_offset(&topic_name, 0, Offset::Beginning) .unwrap(); @@ -355,15 +463,28 @@ async fn test_consume_partition_order() { #[tokio::test] async fn test_produce_consume_message_queue_nonempty_callback() { - let _r = env_logger::try_init(); + init_test_logger(); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let topic_name = rand_test_topic("test_produce_consume_message_queue_nonempty_callback"); - create_topic(&topic_name, 1).await; + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(1)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); - let consumer: BaseConsumer<_> = consumer_config(&rand_test_group(), None) - .create_with_context(ConsumerTestContext { _n: 64 }) - .expect("Consumer creation failed"); + let group_id = rand_test_group(); + let consumer = + utils::consumer::create_base_consumer(&kafka_context.bootstrap_servers, &group_id, None) + .expect("could not create base consumer"); let consumer = Arc::new(consumer); let mut tpl = TopicPartitionList::new(); @@ -409,7 +530,10 @@ async fn test_produce_consume_message_queue_nonempty_callback() { assert!(queue.poll(Duration::from_secs(0)).is_none()); // Populate the topic, and expect a wakeup notifying us of the new messages. - populate_topic(&topic_name, 2, &value_fn, &key_fn, None, None).await; + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + produce_messages(&producer, &topic_name, 2, None, None).await; wait_for_wakeups(1); // Read one of the messages. @@ -417,7 +541,7 @@ async fn test_produce_consume_message_queue_nonempty_callback() { // Add more messages to the topic. Expect no additional wakeups, as the // queue is not fully drained, for 1s. - populate_topic(&topic_name, 2, &value_fn, &key_fn, None, None).await; + produce_messages(&producer, &topic_name, 2, None, None).await; thread::sleep(Duration::from_secs(1)); assert_eq!(wakeups.load(Ordering::SeqCst), 1); @@ -431,7 +555,7 @@ async fn test_produce_consume_message_queue_nonempty_callback() { assert_eq!(wakeups.load(Ordering::SeqCst), 1); // Add another message, and expect a wakeup. - populate_topic(&topic_name, 1, &value_fn, &key_fn, None, None).await; + produce_messages(&producer, &topic_name, 1, None, None).await; wait_for_wakeups(2); // Expect no additional wakeups for 1s. @@ -440,7 +564,7 @@ async fn test_produce_consume_message_queue_nonempty_callback() { // Disable the queue and add another message. queue.set_nonempty_callback(|| ()); - populate_topic(&topic_name, 1, &value_fn, &key_fn, None, None).await; + produce_messages(&producer, &topic_name, 1, None, None).await; // Expect no additional wakeups for 1s. thread::sleep(Duration::from_secs(1)); diff --git a/tests/test_low_producers.rs b/tests/base_producer.rs similarity index 65% rename from tests/test_low_producers.rs rename to tests/base_producer.rs index ac12b8e04..c9e9061b5 100644 --- a/tests/test_low_producers.rs +++ b/tests/base_producer.rs @@ -1,6 +1,6 @@ //! Test data production using low level producers. -use std::collections::{HashMap, HashSet}; +use std::collections::HashSet; use std::error::Error; use std::ffi::CString; use std::sync::Arc; @@ -8,9 +8,7 @@ use std::sync::Mutex; use std::thread; use std::time::Duration; -use maplit::hashmap; - -use rdkafka::config::ClientConfig; +use rdkafka::admin::AdminOptions; use rdkafka::error::{KafkaError, RDKafkaErrorCode}; use rdkafka::message::{Header, Headers, Message, OwnedHeaders, OwnedMessage}; use rdkafka::producer::{ @@ -21,7 +19,11 @@ use rdkafka::types::RDKafkaRespErr; use rdkafka::util::current_time_millis; use rdkafka::{ClientContext, Statistics}; -use crate::utils::*; +use crate::utils::admin; +use crate::utils::containers::KafkaContext; +use crate::utils::logging::init_test_logger; +use crate::utils::producer::base_producer as base_producer_utils; +use crate::utils::rand::*; mod utils; @@ -142,59 +144,33 @@ impl Partitioner for PanicPartitioner { } } -fn default_config(config_overrides: HashMap<&str, &str>) -> ClientConfig { - let mut config = ClientConfig::new(); - config - .set("bootstrap.servers", get_bootstrap_server()) - .set("message.timeout.ms", "5000"); - - for (key, value) in config_overrides { - config.set(key, value); - } - config -} - -fn base_producer(config_overrides: HashMap<&str, &str>) -> BaseProducer { - base_producer_with_context(PrintingContext { _n: 123 }, config_overrides) -} - -fn base_producer_with_context>( - context: C, - config_overrides: HashMap<&str, &str>, -) -> BaseProducer { - configure_logging_for_tests(); - default_config(config_overrides) - .create_with_context::>(context) - .unwrap() -} - -#[allow(dead_code)] -fn threaded_producer( - config_overrides: HashMap<&str, &str>, -) -> ThreadedProducer { - threaded_producer_with_context(PrintingContext { _n: 123 }, config_overrides) -} - -fn threaded_producer_with_context( - context: C, - config_overrides: HashMap<&str, &str>, -) -> ThreadedProducer -where - Part: Partitioner + Send + Sync + 'static, - C: ProducerContext, -{ - configure_logging_for_tests(); - default_config(config_overrides) - .create_with_context::>(context) - .unwrap() -} - // TESTS -#[test] -fn test_base_producer_queue_full() { - let producer = base_producer(hashmap! { "queue.buffering.max.messages" => "10" }); +#[tokio::test(flavor = "multi_thread")] +async fn test_base_producer_queue_full() { + init_test_logger(); + + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let topic_name = rand_test_topic("test_base_producer_queue_full"); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(1)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); + + let producer = base_producer_utils::create_base_producer_with_context( + &kafka_context.bootstrap_servers, + PrintingContext { _n: 123 }, + &[("queue.buffering.max.messages", "10")], + ) + .expect("failed to create base producer"); let results = (0..30) .map(|id| { @@ -230,19 +206,34 @@ fn test_base_producer_queue_full() { assert_eq!(errors, 20); } -#[test] -fn test_base_producer_timeout() { +#[tokio::test(flavor = "multi_thread")] +async fn test_base_producer_timeout() { + init_test_logger(); + let context = CollectingContext::new(); - let bootstrap_server = get_bootstrap_server(); - let producer = base_producer_with_context( - context.clone(), - hashmap! { - "message.timeout.ms" => "100", - "bootstrap.servers" => &bootstrap_server, - }, - ); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let topic_name = rand_test_topic("test_base_producer_timeout"); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(1)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); + + let producer = base_producer_utils::create_base_producer_with_context( + &kafka_context.bootstrap_servers, + context.clone(), + &[("message.timeout.ms", "100")], + ) + .expect("failed to create base producer"); + let results_count = (0..10) .map(|id| { producer.send( @@ -345,14 +336,35 @@ impl ProducerContext for HeaderCheckContext { } } -#[test] -fn test_base_producer_headers() { +#[tokio::test(flavor = "multi_thread")] +async fn test_base_producer_headers() { + init_test_logger(); + let ids_set = Arc::new(Mutex::new(HashSet::new())); let context = HeaderCheckContext { ids: ids_set.clone(), }; - let producer = base_producer_with_context(context, HashMap::new()); let topic_name = rand_test_topic("test_base_producer_headers"); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(1)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); + + let producer = base_producer_utils::create_base_producer_with_context( + &kafka_context.bootstrap_servers, + context, + &[], + ) + .expect("failed to create base producer"); let results_count = (0..10) .map(|id| { @@ -389,11 +401,32 @@ fn test_base_producer_headers() { assert_eq!((*ids_set.lock().unwrap()).len(), 10); } -#[test] -fn test_threaded_producer_send() { +#[tokio::test(flavor = "multi_thread")] +async fn test_threaded_producer_send() { + init_test_logger(); + let context = CollectingContext::new(); - let producer = threaded_producer_with_context(context.clone(), HashMap::new()); let topic_name = rand_test_topic("test_threaded_producer_send"); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(1)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); + + let producer = base_producer_utils::create_threaded_producer_with_context( + &kafka_context.bootstrap_servers, + context.clone(), + &[], + ) + .expect("failed to create threaded producer"); let results_count = (0..10) .map(|id| { @@ -419,8 +452,10 @@ fn test_threaded_producer_send() { } } -#[test] -fn test_base_producer_opaque_arc() -> Result<(), Box> { +#[tokio::test(flavor = "multi_thread")] +async fn test_base_producer_opaque_arc() -> Result<(), Box> { + init_test_logger(); + struct OpaqueArcContext {} impl ClientContext for OpaqueArcContext {} impl ProducerContext for OpaqueArcContext { @@ -434,8 +469,27 @@ fn test_base_producer_opaque_arc() -> Result<(), Box> { let shared_count = Arc::new(Mutex::new(0)); let context = OpaqueArcContext {}; - let producer = base_producer_with_context(context, HashMap::new()); let topic_name = rand_test_topic("test_base_producer_opaque_arc"); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(1)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); + + let producer = base_producer_utils::create_base_producer_with_context( + &kafka_context.bootstrap_servers, + context, + &[], + ) + .expect("failed to create base producer"); let results_count = (0..10) .map(|_| { @@ -452,9 +506,19 @@ fn test_base_producer_opaque_arc() -> Result<(), Box> { Ok(()) } -#[test] -fn test_fatal_errors() { - let producer = base_producer(HashMap::new()); +#[tokio::test(flavor = "multi_thread")] +async fn test_fatal_errors() { + init_test_logger(); + + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); + let producer = base_producer_utils::create_base_producer_with_context( + &kafka_context.bootstrap_servers, + PrintingContext { _n: 123 }, + &[], + ) + .expect("failed to create base producer"); assert_eq!(producer.client().fatal_error(), None); @@ -476,23 +540,38 @@ fn test_fatal_errors() { ) } -#[test] -fn test_register_custom_partitioner_linger_non_zero_key_null() { +#[tokio::test(flavor = "multi_thread")] +async fn test_register_custom_partitioner_linger_non_zero_key_null() { // Custom partitioner is not used when sticky.partitioning.linger.ms > 0 and key is null. // https://github.com/confluentinc/librdkafka/blob/081fd972fa97f88a1e6d9a69fc893865ffbb561a/src/rdkafka_msg.c#L1192-L1196 + init_test_logger(); + let context = CollectingContext::new_with_custom_partitioner(PanicPartitioner {}); - let mut config_overrides = HashMap::new(); - config_overrides.insert("sticky.partitioning.linger.ms", "10"); - let producer = base_producer_with_context(context.clone(), config_overrides); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); + let topic_name = rand_test_topic("test_register_custom_partitioner_linger_non_zero_key_null"); + + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(3)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); + + let producer = base_producer_utils::create_base_producer_with_context( + &kafka_context.bootstrap_servers, + context.clone(), + &[("sticky.partitioning.linger.ms", "10")], + ) + .expect("failed to create base producer"); producer - .send( - BaseRecord::<(), str, usize>::with_opaque_to( - &rand_test_topic("test_register_custom_partitioner_linger_non_zero_key_null"), - 0, - ) - .payload(""), - ) + .send(BaseRecord::<(), str, usize>::with_opaque_to(&topic_name, 0).payload("")) .unwrap(); producer.flush(Duration::from_secs(10)).unwrap(); @@ -505,11 +584,32 @@ fn test_register_custom_partitioner_linger_non_zero_key_null() { } } -#[test] -fn test_custom_partitioner_base_producer() { +#[tokio::test(flavor = "multi_thread")] +async fn test_custom_partitioner_base_producer() { + init_test_logger(); + let context = CollectingContext::new_with_custom_partitioner(FixedPartitioner::new(2)); - let producer = base_producer_with_context(context.clone(), HashMap::new()); let topic_name = rand_test_topic("test_custom_partitioner_base_producer"); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(3)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); + + let producer = base_producer_utils::create_base_producer_with_context( + &kafka_context.bootstrap_servers, + context.clone(), + &[], + ) + .expect("failed to create base producer"); let results_count = (0..10) .map(|id| { @@ -533,11 +633,32 @@ fn test_custom_partitioner_base_producer() { } } -#[test] -fn test_custom_partitioner_threaded_producer() { +#[tokio::test(flavor = "multi_thread")] +async fn test_custom_partitioner_threaded_producer() { + init_test_logger(); + let context = CollectingContext::new_with_custom_partitioner(FixedPartitioner::new(2)); - let producer = threaded_producer_with_context(context.clone(), HashMap::new()); let topic_name = rand_test_topic("test_custom_partitioner_threaded_producer"); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(3)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); + + let producer = base_producer_utils::create_threaded_producer_with_context( + &kafka_context.bootstrap_servers, + context.clone(), + &[], + ) + .expect("failed to create threaded producer"); let results_count = (0..10) .map(|id| { diff --git a/tests/consumer_groups.rs b/tests/consumer_groups.rs new file mode 100644 index 000000000..71d44c8fd --- /dev/null +++ b/tests/consumer_groups.rs @@ -0,0 +1,141 @@ +use crate::utils::consumer; +use crate::utils::containers::KafkaContext; +use crate::utils::logging::init_test_logger; +use crate::utils::rand::{rand_test_group, rand_test_topic}; +use rdkafka::admin::{AdminOptions, GroupResult, NewTopic, TopicReplication}; +use rdkafka_sys::RDKafkaErrorCode; + +mod utils; + +/// Verify that a valid group can be deleted. +#[tokio::test] +pub async fn test_consumer_groups_deletion() { + init_test_logger(); + + // Get Kafka container context. + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); + + // Create admin client + let admin_client = utils::admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("could not create admin client"); + + // Create consumer_client + let group_name = rand_test_group(); + let topic_name = rand_test_topic("test_topic"); + let consumer_client = utils::consumer::create_unsubscribed_base_consumer( + &kafka_context.bootstrap_servers, + Some(&group_name), + ) + .await + .expect("could not create subscribed base consumer"); + + admin_client + .create_topics( + &[NewTopic { + name: &topic_name, + num_partitions: 1, + replication: TopicReplication::Fixed(1), + config: vec![], + }], + &AdminOptions::default(), + ) + .await + .expect("topic creation failed"); + + utils::consumer::create_consumer_group_on_topic(&consumer_client, &topic_name) + .await + .expect("could not create group"); + let res = admin_client + .delete_groups(&[&group_name], &AdminOptions::default()) + .await + .expect("could not delete groups"); + assert_eq!(res, [Ok(group_name.to_string())]); +} + +/// Verify that attempting to delete an unknown group returns a "group not +/// found" error. +#[tokio::test] +pub async fn test_delete_unknown_group() { + init_test_logger(); + + // Get Kafka container context. + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); + + // Create admin client + let admin_client = utils::admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("could not create admin client"); + + let unknown_group_name = rand_test_group(); + let res = admin_client + .delete_groups(&[&unknown_group_name], &AdminOptions::default()) + .await; + let expected: GroupResult = Err((unknown_group_name, RDKafkaErrorCode::NotCoordinator)); + assert_eq!(res, Ok(vec![expected])); +} + +/// Verify that deleting a valid and invalid group results in a mixed result +/// set. +#[tokio::test] +pub async fn test_consumer_group_action_mixed_results() { + init_test_logger(); + + // Get Kafka container context. + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); + + // Create admin client + let admin_client = utils::admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("could not create admin client"); + + // Create consumer_client + let group_name = rand_test_group(); + let topic_name = rand_test_topic("test_topic"); + let consumer_client = utils::consumer::create_unsubscribed_base_consumer( + &kafka_context.bootstrap_servers, + Some(&group_name), + ) + .await + .expect("could not create subscribed base consumer"); + + admin_client + .create_topics( + &[NewTopic { + name: &topic_name, + num_partitions: 1, + replication: TopicReplication::Fixed(1), + config: vec![], + }], + &AdminOptions::default(), + ) + .await + .expect("topic creation failed"); + + let unknown_group_name = rand_test_group(); + consumer::create_consumer_group_on_topic(&consumer_client, &topic_name) + .await + .expect("could not create group"); + let res = admin_client + .delete_groups( + &[&group_name, &unknown_group_name], + &AdminOptions::default(), + ) + .await; + assert_eq!( + res, + Ok(vec![ + Ok(group_name.to_string()), + Err(( + unknown_group_name.to_string(), + RDKafkaErrorCode::GroupIdNotFound + )) + ]) + ); +} diff --git a/tests/test_high_producers.rs b/tests/future_producer.rs similarity index 74% rename from tests/test_high_producers.rs rename to tests/future_producer.rs index 9a71c9981..3a0699839 100644 --- a/tests/test_high_producers.rs +++ b/tests/future_producer.rs @@ -1,10 +1,10 @@ //! Test data production using high level producers. -use std::collections::HashMap; use std::time::{Duration, Instant}; use futures::stream::{FuturesUnordered, StreamExt}; +use rdkafka::admin::AdminOptions; use rdkafka::client::DefaultClientContext; use rdkafka::config::ClientConfig; use rdkafka::error::{KafkaError, RDKafkaErrorCode}; @@ -13,25 +13,36 @@ use rdkafka::producer::{FutureProducer, FutureRecord, Producer}; use rdkafka::util::Timeout; use rdkafka::Timestamp; -use crate::utils::*; +use crate::utils::admin; +use crate::utils::containers::KafkaContext; +use crate::utils::logging::init_test_logger; +use crate::utils::producer; +use crate::utils::rand::*; mod utils; -fn future_producer(config_overrides: HashMap<&str, &str>) -> FutureProducer { - let mut config = ClientConfig::new(); - config - .set("bootstrap.servers", "localhost") - .set("message.timeout.ms", "5000"); - for (key, value) in config_overrides { - config.set(key, value); - } - config.create().expect("Failed to create producer") -} - #[tokio::test] async fn test_future_producer_send() { - let producer = future_producer(HashMap::new()); + init_test_logger(); + + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let topic_name = rand_test_topic("test_future_producer_send"); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(3)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); + + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); let results: FuturesUnordered<_> = (0..10) .map(|_| { @@ -57,11 +68,14 @@ async fn test_future_producer_send_full() { // Connect to a nonexistent Kafka broker with a long message timeout and a // tiny producer queue, so we can fill up the queue for a while by sending a // single message. - let mut config = HashMap::new(); - config.insert("bootstrap.servers", ""); - config.insert("message.timeout.ms", "5000"); - config.insert("queue.buffering.max.messages", "1"); - let producer = &future_producer(config); + let mut config = ClientConfig::new(); + config + .set("bootstrap.servers", "") + .set("message.timeout.ms", "5000") + .set("queue.buffering.max.messages", "1"); + let producer: FutureProducer = + config.create().expect("Failed to create producer"); + let producer = &producer; let topic_name = &rand_test_topic("test_future_producer_send_full"); // Fill up the queue. @@ -97,10 +111,29 @@ async fn test_future_producer_send_full() { #[tokio::test] async fn test_future_producer_send_fail() { - let producer = future_producer(HashMap::new()); + init_test_logger(); + + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); + let topic_name = rand_test_topic("test_future_producer_send_fail"); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(3)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); + + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); let future = producer.send( - FutureRecord::to("topic") + FutureRecord::to(&topic_name) .payload("payload") .key("key") .partition(100) // Fail @@ -128,7 +161,7 @@ async fn test_future_producer_send_fail() { kafka_error.to_string(), "Message production error: UnknownPartition (Local: Unknown partition)" ); - assert_eq!(owned_message.topic(), "topic"); + assert_eq!(owned_message.topic(), topic_name.as_str()); let headers = owned_message.headers().unwrap(); assert_eq!(headers.count(), 3); assert_eq!( diff --git a/tests/test_metadata.rs b/tests/metadata.rs similarity index 59% rename from tests/test_metadata.rs rename to tests/metadata.rs index eab0731b5..242755dfa 100644 --- a/tests/test_metadata.rs +++ b/tests/metadata.rs @@ -2,6 +2,7 @@ use std::time::Duration; +use rdkafka::admin::AdminOptions; use rdkafka::config::ClientConfig; use rdkafka::consumer::{Consumer, StreamConsumer}; use rdkafka::error::KafkaError; @@ -9,32 +10,57 @@ use rdkafka::topic_partition_list::TopicPartitionList; use rdkafka_sys::types::RDKafkaConfRes; +use crate::utils::admin; +use crate::utils::containers::KafkaContext; +use crate::utils::logging::init_test_logger; +use crate::utils::producer; +use crate::utils::rand::*; use crate::utils::*; mod utils; -fn create_consumer(group_id: &str) -> StreamConsumer { - ClientConfig::new() - .set("group.id", group_id) - .set("enable.partition.eof", "true") - .set("client.id", "rdkafka_integration_test_client") - .set("bootstrap.servers", get_bootstrap_server().as_str()) - .set("session.timeout.ms", "6000") - .set("debug", "all") - .set("auto.offset.reset", "earliest") - .create() - .expect("Failed to create StreamConsumer") +async fn create_consumer(kafka_context: &KafkaContext, group_id: &str) -> StreamConsumer { + utils::consumer::stream_consumer::create_stream_consumer_with_options( + &kafka_context.bootstrap_servers, + group_id, + &[ + ("enable.partition.eof", "true"), + ("client.id", "rdkafka_integration_test_client"), + ("session.timeout.ms", "6000"), + ("debug", "all"), + ], + ) + .await + .expect("Failed to create StreamConsumer") } #[tokio::test] async fn test_metadata() { - let _r = env_logger::try_init(); + init_test_logger(); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let topic_name = rand_test_topic("test_metadata"); - populate_topic(&topic_name, 1, &value_fn, &key_fn, Some(0), None).await; - populate_topic(&topic_name, 1, &value_fn, &key_fn, Some(1), None).await; - populate_topic(&topic_name, 1, &value_fn, &key_fn, Some(2), None).await; - let consumer = create_consumer(&rand_test_group()); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(3)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); + + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + produce_messages_to_partition(&producer, &topic_name, 1, 0).await; + produce_messages_to_partition(&producer, &topic_name, 1, 1).await; + produce_messages_to_partition(&producer, &topic_name, 1, 2).await; + + let consumer = create_consumer(&kafka_context, &rand_test_group()).await; let metadata = consumer .fetch_metadata(None, Duration::from_secs(5)) @@ -89,11 +115,27 @@ async fn test_metadata() { #[tokio::test] async fn test_subscription() { - let _r = env_logger::try_init(); + init_test_logger(); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let topic_name = rand_test_topic("test_subscription"); - populate_topic(&topic_name, 10, &value_fn, &key_fn, None, None).await; - let consumer = create_consumer(&rand_test_group()); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(1)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + produce_messages(&producer, &topic_name, 10, None, None).await; + let consumer = create_consumer(&kafka_context, &rand_test_group()).await; consumer.subscribe(&[topic_name.as_str()]).unwrap(); // Make sure the consumer joins the group. @@ -106,14 +148,32 @@ async fn test_subscription() { #[tokio::test] async fn test_group_membership() { - let _r = env_logger::try_init(); + init_test_logger(); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let topic_name = rand_test_topic("test_group_membership"); let group_name = rand_test_group(); - populate_topic(&topic_name, 1, &value_fn, &key_fn, Some(0), None).await; - populate_topic(&topic_name, 1, &value_fn, &key_fn, Some(1), None).await; - populate_topic(&topic_name, 1, &value_fn, &key_fn, Some(2), None).await; - let consumer = create_consumer(&group_name); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&topic_name, Some(3)), + &AdminOptions::default(), + ) + .await + .expect("could not create topic"); + + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + produce_messages_to_partition(&producer, &topic_name, 1, 0).await; + produce_messages_to_partition(&producer, &topic_name, 1, 1).await; + produce_messages_to_partition(&producer, &topic_name, 1, 2).await; + + let consumer = create_consumer(&kafka_context, &group_name).await; consumer.subscribe(&[topic_name.as_str()]).unwrap(); // Make sure the consumer joins the group. diff --git a/tests/producer.rs b/tests/producer.rs new file mode 100644 index 000000000..b41a9d394 --- /dev/null +++ b/tests/producer.rs @@ -0,0 +1,87 @@ +use crate::utils::admin::{create_admin_client, create_topic}; +use crate::utils::consumer::{create_subscribed_base_consumer, poll_x_times_for_messages}; +use crate::utils::containers::KafkaContext; +use crate::utils::logging::init_test_logger; +use crate::utils::producer::base_producer::create_producer; +use crate::utils::rand::rand_test_topic; +use rdkafka::producer::BaseRecord; +use rdkafka::Message; + +#[path = "utils/mod.rs"] +mod utils; + +#[tokio::test] +pub async fn test_basic_produce() { + init_test_logger(); + + let kafka_context_result = KafkaContext::shared().await; + let Ok(kafka_context) = kafka_context_result else { + panic!( + "could not create kafka context: {}", + kafka_context_result.unwrap_err() + ); + }; + let test_topic_name = rand_test_topic("testing-topic"); + + let admin_client_result = create_admin_client(&kafka_context.bootstrap_servers).await; + let Ok(admin_client) = admin_client_result else { + panic!( + "could not create admin client: {}", + admin_client_result.unwrap_err() + ); + }; + let create_topic_result = create_topic(&admin_client, &test_topic_name).await; + if create_topic_result.is_err() { + panic!( + "could not create topic: {}", + create_topic_result.unwrap_err() + ); + } + + let consumer_result = + create_subscribed_base_consumer(&kafka_context.bootstrap_servers, None, &test_topic_name) + .await; + let Ok(consumer) = consumer_result else { + panic!( + "could not create consumer: {}", + consumer_result.unwrap_err() + ); + }; + + let create_producer_result = create_producer(&kafka_context.bootstrap_servers).await; + let Ok(base_producer) = create_producer_result else { + panic!( + "could not create base producer: {}", + create_producer_result.unwrap_err() + ); + }; + + let record = BaseRecord::to(&test_topic_name) // destination topic + .key(&[1, 2, 3, 4]) // message key + .payload("content"); // message payload + let send_record_result = + crate::utils::producer::base_producer::send_record(&base_producer, record).await; + if send_record_result.is_err() { + panic!("could not send record: {}", send_record_result.unwrap_err()); + } + + let messages_result = poll_x_times_for_messages(&consumer, 10).await; + let Ok(messages) = messages_result else { + panic!("could not get messages from consumer"); + }; + if messages.len() != 1 { + panic!("expected exactly one message"); + } + let borrowed_next_message = messages.get(0).unwrap(); + + let owned_next_message = borrowed_next_message.detach(); + let Some(message_payload) = owned_next_message.payload() else { + panic!("message payload is empty"); + }; + let message_string_result = String::from_utf8(message_payload.to_vec()); + let Ok(message_string) = message_string_result else { + panic!("message payload is not valid UTF-8"); + }; + + assert!(message_string.contains("content")); +} diff --git a/tests/test_high_consumers.rs b/tests/stream_consumers.rs similarity index 57% rename from tests/test_high_consumers.rs rename to tests/stream_consumers.rs index b22dc0b2b..c5030734e 100644 --- a/tests/test_high_consumers.rs +++ b/tests/stream_consumers.rs @@ -1,15 +1,16 @@ //! Test data consumption using high level consumers. -use std::collections::HashMap; use std::error::Error; use std::sync::Arc; +use anyhow::Context; use futures::future; use futures::stream::StreamExt; use maplit::hashmap; use rdkafka_sys::RDKafkaErrorCode; use tokio::time::{self, Duration}; +use rdkafka::admin::AdminOptions; use rdkafka::consumer::{CommitMode, Consumer, ConsumerContext, StreamConsumer}; use rdkafka::error::KafkaError; use rdkafka::topic_partition_list::{Offset, TopicPartitionList}; @@ -17,36 +18,18 @@ use rdkafka::util::current_time_millis; use rdkafka::{Message, Timestamp}; use rdkafka_sys::types::RDKafkaConfRes; +use crate::utils::admin::new_topic_vec; +use crate::utils::containers::KafkaContext; +use crate::utils::logging::init_test_logger; +use crate::utils::rand::*; use crate::utils::*; mod utils; -// Create stream consumer for tests -fn create_stream_consumer( - group_id: &str, - config_overrides: Option>, -) -> StreamConsumer { - let cons_context = ConsumerTestContext { _n: 64 }; - create_stream_consumer_with_context(group_id, config_overrides, cons_context) -} - -fn create_stream_consumer_with_context( - group_id: &str, - config_overrides: Option>, - context: C, -) -> StreamConsumer -where - C: ConsumerContext + 'static, -{ - consumer_config(group_id, config_overrides) - .create_with_context(context) - .expect("Consumer creation failed") -} - #[tokio::test] async fn test_invalid_max_poll_interval() { let res: Result = consumer_config( - &rand_test_group(), + &crate::utils::rand::rand_test_group(), Some(hashmap! { "max.poll.interval.ms" => "-1" }), ) .create(); @@ -70,27 +53,52 @@ async fn test_invalid_max_poll_interval() { // All produced messages should be consumed. #[tokio::test(flavor = "multi_thread")] async fn test_produce_consume_base() { - let _r = env_logger::try_init(); + init_test_logger(); + + // Get Kafka container context. + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); + + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + let num_of_messages_to_send = 100usize; let start_time = current_time_millis(); let topic_name = rand_test_topic("test_produce_consume_base"); - let message_map = populate_topic(&topic_name, 100, &value_fn, &key_fn, None, None).await; - let consumer = create_stream_consumer(&rand_test_group(), None); - consumer.subscribe(&[topic_name.as_str()]).unwrap(); + let message_map = topics::populate_topic_using_future_producer( + &producer, + &topic_name, + num_of_messages_to_send, + None, + ) + .await + .expect("Could not populate topic using Future producer"); + // let message_map = populate_topic(&topic_name, 100, &value_fn, &key_fn, None, None).await; + let consumer = utils::consumer::stream_consumer::create_stream_consumer( + &kafka_context.bootstrap_servers, + Some(&rand_test_group()), + ) + .await + .expect("could not create stream consumer"); + consumer + .subscribe(&[topic_name.as_str()]) + .expect("could not subscribe to kafka topic"); - let _consumer_future = consumer + consumer .stream() - .take(100) + .take(num_of_messages_to_send) .for_each(|message| { match message { Ok(m) => { let id = message_map[&(m.partition(), m.offset())]; match m.timestamp() { Timestamp::CreateTime(timestamp) => assert!(timestamp >= start_time), - _ => panic!("Expected createtime for message timestamp"), + _ => panic!("Expected create time for message timestamp"), }; - assert_eq!(m.payload_view::().unwrap().unwrap(), value_fn(id)); - assert_eq!(m.key_view::().unwrap().unwrap(), key_fn(id)); + assert_eq!(m.payload_view::().unwrap().unwrap(), id.to_string()); + assert_eq!(m.key_view::().unwrap().unwrap(), id.to_string()); assert_eq!(m.topic(), topic_name.as_str()); } Err(e) => panic!("Error receiving message: {:?}", e), @@ -106,13 +114,39 @@ async fn test_produce_consume_base() { /// waker slot. #[tokio::test(flavor = "multi_thread")] async fn test_produce_consume_base_concurrent() { - let _r = env_logger::try_init(); + init_test_logger(); - let topic_name = rand_test_topic("test_produce_consume_base_concurrent"); - populate_topic(&topic_name, 100, &value_fn, &key_fn, None, None).await; + // Get Kafka container context. + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); - let consumer = Arc::new(create_stream_consumer(&rand_test_group(), None)); - consumer.subscribe(&[topic_name.as_str()]).unwrap(); + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + + let num_of_messages_to_send = 100usize; + let topic_name = rand_test_topic("test_produce_consume_base_concurrent"); + topics::populate_topic_using_future_producer( + &producer, + &topic_name, + num_of_messages_to_send, + None, + ) + .await + .expect("Could not populate topic using Future producer"); + // let message_map = populate_topic(&topic_name, 100, &value_fn, &key_fn, None, None).await; + let consumer = Arc::new( + consumer::stream_consumer::create_stream_consumer( + &kafka_context.bootstrap_servers, + Some(&rand_test_group()), + ) + .await + .expect("could not create stream consumer"), + ); + consumer + .subscribe(&[topic_name.as_str()]) + .expect("could not subscribe to kafka topic"); let mk_task = || { let consumer = consumer.clone(); @@ -136,13 +170,61 @@ async fn test_produce_consume_base_concurrent() { // All produced messages should be consumed. #[tokio::test(flavor = "multi_thread")] async fn test_produce_consume_base_assign() { - let _r = env_logger::try_init(); + init_test_logger(); + + // Get Kafka container context. + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let topic_name = rand_test_topic("test_produce_consume_base_assign"); - populate_topic(&topic_name, 10, &value_fn, &key_fn, Some(0), None).await; - populate_topic(&topic_name, 10, &value_fn, &key_fn, Some(1), None).await; - populate_topic(&topic_name, 10, &value_fn, &key_fn, Some(2), None).await; - let consumer = create_stream_consumer(&rand_test_group(), None); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &new_topic_vec(&topic_name, Some(3)), + &AdminOptions::default(), + ) + .await + .expect("could not create topics"); + + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + + let num_of_messages_to_send = 10usize; + topics::populate_topic_using_future_producer( + &producer, + &topic_name, + num_of_messages_to_send, + Some(0), + ) + .await + .expect("Could not populate topic using Future producer"); + topics::populate_topic_using_future_producer( + &producer, + &topic_name, + num_of_messages_to_send, + Some(1), + ) + .await + .expect("Could not populate topic using Future producer"); + topics::populate_topic_using_future_producer( + &producer, + &topic_name, + num_of_messages_to_send, + Some(2), + ) + .await + .expect("Could not populate topic using Future producer"); + + let consumer = utils::consumer::stream_consumer::create_stream_consumer( + &kafka_context.bootstrap_servers, + Some(&rand_test_group()), + ) + .await + .expect("could not create stream consumer"); let mut tpl = TopicPartitionList::new(); tpl.add_partition_offset(&topic_name, 0, Offset::Beginning) .unwrap(); @@ -171,13 +253,62 @@ async fn test_produce_consume_base_assign() { #[tokio::test(flavor = "multi_thread")] async fn test_produce_consume_base_unassign() { - let _r = env_logger::try_init(); + init_test_logger(); + + // Get Kafka container context. + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); + + let topic_name = rand_test_topic("test_produce_consume_base_assign"); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &new_topic_vec(&topic_name, Some(3)), + &AdminOptions::default(), + ) + .await + .expect("could not create topics"); + + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + + let consumer = utils::consumer::stream_consumer::create_stream_consumer( + &kafka_context.bootstrap_servers, + Some(&rand_test_group()), + ) + .await + .expect("could not create stream consumer"); + + let num_of_messages_to_send = 10usize; + topics::populate_topic_using_future_producer( + &producer, + &topic_name, + num_of_messages_to_send, + Some(0), + ) + .await + .expect("Could not populate topic using Future producer"); + topics::populate_topic_using_future_producer( + &producer, + &topic_name, + num_of_messages_to_send, + Some(1), + ) + .await + .expect("Could not populate topic using Future producer"); + topics::populate_topic_using_future_producer( + &producer, + &topic_name, + num_of_messages_to_send, + Some(2), + ) + .await + .expect("Could not populate topic using Future producer"); - let topic_name = rand_test_topic("test_produce_consume_base_unassign"); - populate_topic(&topic_name, 10, &value_fn, &key_fn, Some(0), None).await; - populate_topic(&topic_name, 10, &value_fn, &key_fn, Some(1), None).await; - populate_topic(&topic_name, 10, &value_fn, &key_fn, Some(2), None).await; - let consumer = create_stream_consumer(&rand_test_group(), None); let mut tpl = TopicPartitionList::new(); tpl.add_partition_offset(&topic_name, 0, Offset::Beginning) .unwrap(); @@ -196,13 +327,61 @@ async fn test_produce_consume_base_unassign() { #[tokio::test(flavor = "multi_thread")] async fn test_produce_consume_base_incremental_assign_and_unassign() { - let _r = env_logger::try_init(); + init_test_logger(); + + // Get Kafka container context. + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let topic_name = rand_test_topic("test_produce_consume_base_incremental_assign_and_unassign"); - populate_topic(&topic_name, 10, &value_fn, &key_fn, Some(0), None).await; - populate_topic(&topic_name, 10, &value_fn, &key_fn, Some(1), None).await; - populate_topic(&topic_name, 10, &value_fn, &key_fn, Some(2), None).await; - let consumer = create_stream_consumer(&rand_test_group(), None); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &new_topic_vec(&topic_name, Some(3)), + &AdminOptions::default(), + ) + .await + .expect("could not create topics"); + + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + + let consumer = utils::consumer::stream_consumer::create_stream_consumer( + &kafka_context.bootstrap_servers, + Some(&rand_test_group()), + ) + .await + .expect("could not create stream consumer"); + + let num_of_messages_to_send = 10usize; + topics::populate_topic_using_future_producer( + &producer, + &topic_name, + num_of_messages_to_send, + Some(0), + ) + .await + .expect("Could not populate topic using Future producer"); + topics::populate_topic_using_future_producer( + &producer, + &topic_name, + num_of_messages_to_send, + Some(1), + ) + .await + .expect("Could not populate topic using Future producer"); + topics::populate_topic_using_future_producer( + &producer, + &topic_name, + num_of_messages_to_send, + Some(2), + ) + .await + .expect("Could not populate topic using Future producer"); // Adding a simple partition let mut tpl = TopicPartitionList::new(); @@ -237,12 +416,25 @@ async fn test_produce_consume_base_incremental_assign_and_unassign() { // All produced messages should be consumed. #[tokio::test(flavor = "multi_thread")] async fn test_produce_consume_with_timestamp() { - let _r = env_logger::try_init(); + init_test_logger(); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let topic_name = rand_test_topic("test_produce_consume_with_timestamp"); - let message_map = - populate_topic(&topic_name, 100, &value_fn, &key_fn, Some(0), Some(1111)).await; - let consumer = create_stream_consumer(&rand_test_group(), None); + + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + + let message_map = produce_messages_with_timestamp(&producer, &topic_name, 100, 0, 1111).await; + + let consumer = utils::consumer::stream_consumer::create_stream_consumer( + &kafka_context.bootstrap_servers, + Some(&rand_test_group()), + ) + .await + .expect("could not create stream consumer"); consumer.subscribe(&[topic_name.as_str()]).unwrap(); let _consumer_future = consumer @@ -262,7 +454,7 @@ async fn test_produce_consume_with_timestamp() { }) .await; - populate_topic(&topic_name, 10, &value_fn, &key_fn, Some(0), Some(999_999)).await; + let _ = produce_messages_with_timestamp(&producer, &topic_name, 10, 0, 999_999).await; // Lookup the offsets let tpl = consumer @@ -278,13 +470,39 @@ async fn test_produce_consume_with_timestamp() { // TODO: add check that commit cb gets called correctly #[tokio::test(flavor = "multi_thread")] async fn test_consumer_commit_message() { - let _r = env_logger::try_init(); + init_test_logger(); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let topic_name = rand_test_topic("test_consumer_commit_message"); - populate_topic(&topic_name, 10, &value_fn, &key_fn, Some(0), None).await; - populate_topic(&topic_name, 11, &value_fn, &key_fn, Some(1), None).await; - populate_topic(&topic_name, 12, &value_fn, &key_fn, Some(2), None).await; - let consumer = create_stream_consumer(&rand_test_group(), None); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &new_topic_vec(&topic_name, Some(3)), + &AdminOptions::default(), + ) + .await + .expect("could not create topics"); + + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + + let _ = produce_messages_to_partition(&producer, &topic_name, 10, 0).await; + let _ = produce_messages_to_partition(&producer, &topic_name, 11, 1).await; + let _ = produce_messages_to_partition(&producer, &topic_name, 12, 2).await; + + let group_name = rand_test_group(); + let consumer = utils::consumer::stream_consumer::create_stream_consumer_with_options( + &kafka_context.bootstrap_servers, + &group_name, + &[], + ) + .await + .expect("could not create stream consumer"); consumer.subscribe(&[topic_name.as_str()]).unwrap(); let _consumer_future = consumer @@ -356,16 +574,42 @@ async fn test_consumer_commit_message() { #[tokio::test(flavor = "multi_thread")] async fn test_consumer_store_offset_commit() { - let _r = env_logger::try_init(); + init_test_logger(); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let topic_name = rand_test_topic("test_consumer_store_offset_commit"); - populate_topic(&topic_name, 10, &value_fn, &key_fn, Some(0), None).await; - populate_topic(&topic_name, 11, &value_fn, &key_fn, Some(1), None).await; - populate_topic(&topic_name, 12, &value_fn, &key_fn, Some(2), None).await; - let mut config = HashMap::new(); - config.insert("enable.auto.offset.store", "false"); - config.insert("enable.partition.eof", "true"); - let consumer = create_stream_consumer(&rand_test_group(), Some(config)); + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &new_topic_vec(&topic_name, Some(3)), + &AdminOptions::default(), + ) + .await + .expect("could not create topics"); + + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + + let _ = produce_messages_to_partition(&producer, &topic_name, 10, 0).await; + let _ = produce_messages_to_partition(&producer, &topic_name, 11, 1).await; + let _ = produce_messages_to_partition(&producer, &topic_name, 12, 2).await; + + let group_name = rand_test_group(); + let consumer = utils::consumer::stream_consumer::create_stream_consumer_with_options( + &kafka_context.bootstrap_servers, + &group_name, + &[ + ("enable.auto.offset.store", "false"), + ("enable.partition.eof", "true"), + ], + ) + .await + .expect("could not create stream consumer"); consumer.subscribe(&[topic_name.as_str()]).unwrap(); let _consumer_future = consumer @@ -441,21 +685,42 @@ async fn test_consumer_store_offset_commit() { #[tokio::test(flavor = "multi_thread")] async fn test_consumer_commit_metadata() -> Result<(), Box> { - let _ = env_logger::try_init(); + init_test_logger(); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let topic_name = rand_test_topic("test_consumer_commit_metadata"); let group_name = rand_test_group(); - populate_topic(&topic_name, 10, &value_fn, &key_fn, None, None).await; + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &new_topic_vec(&topic_name, Some(3)), + &AdminOptions::default(), + ) + .await + .expect("could not create topics"); + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + let _ = produce_messages_to_partition(&producer, &topic_name, 4, 0).await; + let _ = produce_messages_to_partition(&producer, &topic_name, 4, 1).await; + let _ = produce_messages_to_partition(&producer, &topic_name, 4, 2).await; let create_consumer = || async { - // Disable auto-commit so we can manually drive the commits. - let mut config = HashMap::new(); - config.insert("enable.auto.commit", "false"); - let consumer = create_stream_consumer(&group_name, Some(config)); - - // Subscribe to the topic and wait for at least one message, which - // ensures that the consumer group has been joined and such. - consumer.subscribe(&[topic_name.as_str()])?; + let consumer = utils::consumer::stream_consumer::create_stream_consumer_with_options( + &kafka_context.bootstrap_servers, + &group_name, + &[], + ) + .await + .context("failed to create stream consumer")?; + + consumer + .subscribe(&[topic_name.as_str()]) + .context("failed to subscribe to topic")?; let _ = consumer.stream().next().await; Ok::<_, Box>(consumer) @@ -496,17 +761,42 @@ async fn test_consumer_commit_metadata() -> Result<(), Box> { #[tokio::test(flavor = "multi_thread")] async fn test_consume_partition_order() { - let _r = env_logger::try_init(); + init_test_logger(); + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let topic_name = rand_test_topic("test_consume_partition_order"); - populate_topic(&topic_name, 4, &value_fn, &key_fn, Some(0), None).await; - populate_topic(&topic_name, 4, &value_fn, &key_fn, Some(1), None).await; - populate_topic(&topic_name, 4, &value_fn, &key_fn, Some(2), None).await; + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &new_topic_vec(&topic_name, Some(3)), + &AdminOptions::default(), + ) + .await + .expect("could not create topics"); + + let producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + let _ = produce_messages_to_partition(&producer, &topic_name, 4, 0).await; + let _ = produce_messages_to_partition(&producer, &topic_name, 4, 1).await; + let _ = produce_messages_to_partition(&producer, &topic_name, 4, 2).await; // Using partition queues should allow us to consume the partitions // in a round-robin fashion. { - let consumer = Arc::new(create_stream_consumer(&rand_test_group(), None)); + let consumer = Arc::new( + utils::consumer::stream_consumer::create_stream_consumer_with_options( + &kafka_context.bootstrap_servers, + &rand_test_group(), + &[], + ) + .await + .expect("could not create stream consumer"), + ); let mut tpl = TopicPartitionList::new(); tpl.add_partition_offset(&topic_name, 0, Offset::Beginning) .unwrap(); @@ -535,7 +825,15 @@ async fn test_consume_partition_order() { // When not all partitions have been split into separate queues, the // unsplit partitions should still be accessible via the main queue. { - let consumer = Arc::new(create_stream_consumer(&rand_test_group(), None)); + let consumer = Arc::new( + utils::consumer::stream_consumer::create_stream_consumer_with_options( + &kafka_context.bootstrap_servers, + &rand_test_group(), + &[], + ) + .await + .expect("could not create stream consumer"), + ); let mut tpl = TopicPartitionList::new(); tpl.add_partition_offset(&topic_name, 0, Offset::Beginning) .unwrap(); @@ -599,7 +897,15 @@ async fn test_consume_partition_order() { // should be continuously polled to serve callbacks, but it should not panic // or result in memory unsafety, etc. { - let consumer = Arc::new(create_stream_consumer(&rand_test_group(), None)); + let consumer = Arc::new( + utils::consumer::stream_consumer::create_stream_consumer_with_options( + &kafka_context.bootstrap_servers, + &rand_test_group(), + &[], + ) + .await + .expect("could not create stream consumer"), + ); let mut tpl = TopicPartitionList::new(); tpl.add_partition_offset(&topic_name, 0, Offset::Beginning) .unwrap(); diff --git a/tests/test_admin.rs b/tests/test_admin.rs deleted file mode 100644 index 87f258a9e..000000000 --- a/tests/test_admin.rs +++ /dev/null @@ -1,630 +0,0 @@ -//! Test administrative commands using the admin API. - -use std::time::Duration; - -use backon::{BlockingRetryable, ExponentialBuilder}; - -use rdkafka::admin::{ - AdminClient, AdminOptions, AlterConfig, ConfigEntry, ConfigSource, GroupResult, NewPartitions, - NewTopic, OwnedResourceSpecifier, ResourceSpecifier, TopicReplication, -}; -use rdkafka::client::DefaultClientContext; -use rdkafka::consumer::{BaseConsumer, CommitMode, Consumer, DefaultConsumerContext}; -use rdkafka::error::{KafkaError, RDKafkaErrorCode}; -use rdkafka::metadata::Metadata; -use rdkafka::producer::{FutureProducer, FutureRecord, Producer}; -use rdkafka::{ClientConfig, Offset, TopicPartitionList}; - -use crate::utils::*; - -mod utils; - -fn create_config() -> ClientConfig { - let mut config = ClientConfig::new(); - config.set("bootstrap.servers", get_bootstrap_server().as_str()); - config -} - -fn create_admin_client() -> AdminClient { - configure_logging_for_tests(); - create_config() - .create() - .expect("admin client creation failed") -} - -async fn create_consumer_group(consumer_group_name: &str) { - let admin_client = create_admin_client(); - let topic_name = &rand_test_topic(consumer_group_name); - let consumer: BaseConsumer = create_config() - .set("group.id", consumer_group_name) - .create() - .expect("create consumer failed"); - - admin_client - .create_topics( - &[NewTopic { - name: topic_name, - num_partitions: 1, - replication: TopicReplication::Fixed(1), - config: vec![], - }], - &AdminOptions::default(), - ) - .await - .expect("topic creation failed"); - let topic_partition_list = { - let mut lst = TopicPartitionList::new(); - lst.add_partition(topic_name, 0); - lst - }; - consumer - .assign(&topic_partition_list) - .expect("assign topic partition list failed"); - consumer - .fetch_metadata(None, Duration::from_secs(3)) - .expect("unable to fetch metadata"); - (|| consumer.store_offset(topic_name, 0, -1)) - .retry(ExponentialBuilder::default().with_max_delay(Duration::from_secs(5))) - .call() - .expect("store offset failed"); - consumer - .commit_consumer_state(CommitMode::Sync) - .expect("commit the consumer state failed"); -} - -fn fetch_metadata(topic: &str) -> Metadata { - let consumer: BaseConsumer = - create_config().create().expect("consumer creation failed"); - let timeout = Some(Duration::from_secs(1)); - - (|| { - let metadata = consumer - .fetch_metadata(Some(topic), timeout) - .map_err(|e| e.to_string())?; - if metadata.topics().is_empty() { - Err("metadata fetch returned no topics".to_string())? - } - let topic = &metadata.topics()[0]; - if topic.partitions().is_empty() { - Err("metadata fetch returned a topic with no partitions".to_string())? - } - Ok::<_, String>(metadata) - }) - .retry(ExponentialBuilder::default().with_max_delay(Duration::from_secs(5))) - .call() - .unwrap() -} - -fn verify_delete(topic: &str) { - let consumer: BaseConsumer = - create_config().create().expect("consumer creation failed"); - let timeout = Some(Duration::from_secs(1)); - - (|| { - // Asking about the topic specifically will recreate it (under the - // default Kafka configuration, at least) so we have to ask for the list - // of all topics and search through it. - let metadata = consumer - .fetch_metadata(None, timeout) - .map_err(|e| e.to_string())?; - if metadata.topics().iter().any(|t| t.name() == topic) { - Err(format!("topic {} still exists", topic))? - } - Ok::<(), String>(()) - }) - .retry(ExponentialBuilder::default().with_max_delay(Duration::from_secs(5))) - .call() - .unwrap() -} - -#[tokio::test] -async fn test_topics() { - let admin_client = create_admin_client(); - let opts = AdminOptions::new().operation_timeout(Some(Duration::from_secs(30))); - - // Verify that topics are created as specified, and that they can later - // be deleted. - { - let name1 = rand_test_topic("test_topics"); - let name2 = rand_test_topic("test_topics"); - - // Test both the builder API and the literal construction. - let topic1 = - NewTopic::new(&name1, 1, TopicReplication::Fixed(1)).set("max.message.bytes", "1234"); - let topic2 = NewTopic { - name: &name2, - num_partitions: 3, - replication: TopicReplication::Variable(&[&[0], &[0], &[0]]), - config: Vec::new(), - }; - - let res = admin_client - .create_topics(&[topic1, topic2], &opts) - .await - .expect("topic creation failed"); - assert_eq!(res, &[Ok(name1.clone()), Ok(name2.clone())]); - - let metadata1 = fetch_metadata(&name1); - let metadata2 = fetch_metadata(&name2); - assert_eq!(1, metadata1.topics().len()); - assert_eq!(1, metadata2.topics().len()); - let metadata_topic1 = &metadata1.topics()[0]; - let metadata_topic2 = &metadata2.topics()[0]; - assert_eq!(&name1, metadata_topic1.name()); - assert_eq!(&name2, metadata_topic2.name()); - assert_eq!(1, metadata_topic1.partitions().len()); - assert_eq!(3, metadata_topic2.partitions().len()); - - let res = admin_client - .describe_configs( - &[ - ResourceSpecifier::Topic(&name1), - ResourceSpecifier::Topic(&name2), - ], - &opts, - ) - .await - .expect("describe configs failed"); - let config1 = &res[0].as_ref().expect("describe configs failed on topic 1"); - let config2 = &res[1].as_ref().expect("describe configs failed on topic 2"); - let mut expected_entry1 = ConfigEntry { - name: "max.message.bytes".into(), - value: Some("1234".into()), - source: ConfigSource::DynamicTopic, - is_read_only: false, - is_default: false, - is_sensitive: false, - }; - let default_max_msg_bytes = if get_broker_version() <= KafkaVersion(2, 3, 0, 0) { - "1000012" - } else { - "1048588" - }; - let expected_entry2 = ConfigEntry { - name: "max.message.bytes".into(), - value: Some(default_max_msg_bytes.into()), - source: ConfigSource::Default, - is_read_only: false, - is_default: true, - is_sensitive: false, - }; - if get_broker_version() < KafkaVersion(1, 1, 0, 0) { - expected_entry1.source = ConfigSource::Unknown; - } - assert_eq!(Some(&expected_entry1), config1.get("max.message.bytes")); - assert_eq!(Some(&expected_entry2), config2.get("max.message.bytes")); - let config_entries1 = config1.entry_map(); - let config_entries2 = config2.entry_map(); - assert_eq!(config1.entries.len(), config_entries1.len()); - assert_eq!(config2.entries.len(), config_entries2.len()); - assert_eq!( - Some(&&expected_entry1), - config_entries1.get("max.message.bytes") - ); - assert_eq!( - Some(&&expected_entry2), - config_entries2.get("max.message.bytes") - ); - - let partitions1 = NewPartitions::new(&name1, 5); - let res = admin_client - .create_partitions(&[partitions1], &opts) - .await - .expect("partition creation failed"); - assert_eq!(res, &[Ok(name1.clone())]); - - let mut tries = 0; - loop { - let metadata = fetch_metadata(&name1); - let topic = &metadata.topics()[0]; - let n = topic.partitions().len(); - if n == 5 { - break; - } else if tries >= 5 { - panic!("topic has {} partitions, but expected {}", n, 5); - } else { - tries += 1; - tokio::time::sleep(Duration::from_secs(1)).await; - } - } - - let res = admin_client - .delete_topics(&[&name1, &name2], &opts) - .await - .expect("topic deletion failed"); - assert_eq!(res, &[Ok(name1.clone()), Ok(name2.clone())]); - verify_delete(&name1); - verify_delete(&name2); - } - - // Verify that incorrect replication configurations are ignored when - // creating topics. - { - let topic = NewTopic::new("ignored", 1, TopicReplication::Variable(&[&[0], &[0]])); - let res = admin_client.create_topics(&[topic], &opts).await; - assert_eq!( - Err(KafkaError::AdminOpCreation( - "replication configuration for topic 'ignored' assigns 2 partition(s), \ - which does not match the specified number of partitions (1)" - .into() - )), - res, - ) - } - - // Verify that incorrect replication configurations are ignored when - // creating partitions. - { - let name = rand_test_topic("test_topics"); - let topic = NewTopic::new(&name, 1, TopicReplication::Fixed(1)); - - let res = admin_client - .create_topics(vec![&topic], &opts) - .await - .expect("topic creation failed"); - assert_eq!(res, &[Ok(name.clone())]); - let _ = fetch_metadata(&name); - - // This partition specification is obviously garbage, and so trips - // a client-side error. - let partitions = NewPartitions::new(&name, 2).assign(&[&[0], &[0], &[0]]); - let res = admin_client.create_partitions(&[partitions], &opts).await; - assert_eq!( - res, - Err(KafkaError::AdminOpCreation(format!( - "partition assignment for topic '{}' assigns 3 partition(s), \ - which is more than the requested total number of partitions (2)", - name - ))) - ); - - // Only the server knows that this partition specification is garbage. - let partitions = NewPartitions::new(&name, 2).assign(&[&[0], &[0]]); - let res = admin_client - .create_partitions(&[partitions], &opts) - .await - .expect("partition creation failed"); - assert_eq!( - res, - &[Err((name, RDKafkaErrorCode::InvalidReplicaAssignment))], - ); - } - - // Verify that deleting a non-existent topic fails. - { - let name = rand_test_topic("test_topics"); - let res = admin_client - .delete_topics(&[&name], &opts) - .await - .expect("delete topics failed"); - assert_eq!( - res, - &[Err((name, RDKafkaErrorCode::UnknownTopicOrPartition))] - ); - } - - // Verify that mixed-success operations properly report the successful and - // failing operators. - { - let name1 = rand_test_topic("test_topics"); - let name2 = rand_test_topic("test_topics"); - - let topic1 = NewTopic::new(&name1, 1, TopicReplication::Fixed(1)); - let topic2 = NewTopic::new(&name2, 1, TopicReplication::Fixed(1)); - - let res = admin_client - .create_topics(vec![&topic1], &opts) - .await - .expect("topic creation failed"); - assert_eq!(res, &[Ok(name1.clone())]); - let _ = fetch_metadata(&name1); - - let res = admin_client - .create_topics(vec![&topic1, &topic2], &opts) - .await - .expect("topic creation failed"); - assert_eq!( - res, - &[ - Err((name1.clone(), RDKafkaErrorCode::TopicAlreadyExists)), - Ok(name2.clone()) - ] - ); - let _ = fetch_metadata(&name2); - - let res = admin_client - .delete_topics(&[&name1], &opts) - .await - .expect("topic deletion failed"); - assert_eq!(res, &[Ok(name1.clone())]); - verify_delete(&name1); - - let res = admin_client - .delete_topics(&[&name2, &name1], &opts) - .await - .expect("topic deletion failed"); - assert_eq!( - res, - &[ - Ok(name2.clone()), - Err((name1.clone(), RDKafkaErrorCode::UnknownTopicOrPartition)) - ] - ); - } -} - -/// Test the admin client's delete records functionality. -#[tokio::test] -async fn test_delete_records() { - let producer = create_config().create::>().unwrap(); - let admin_client = create_admin_client(); - let timeout = Some(Duration::from_secs(1)); - let opts = AdminOptions::new().operation_timeout(timeout); - let topic = rand_test_topic("test_delete_records"); - let make_record = || FutureRecord::::to(&topic).payload("data"); - - // Create a topic with a single partition. - admin_client - .create_topics( - &[NewTopic::new(&topic, 1, TopicReplication::Fixed(1))], - &opts, - ) - .await - .expect("topic creation failed"); - - // Ensure that the topic begins with low and high water marks of 0. - let (lo, hi) = (|| producer.client().fetch_watermarks(&topic, 0, timeout)) - .retry(ExponentialBuilder::default().with_max_delay(Duration::from_secs(5))) - .call() - .unwrap(); - assert_eq!(lo, 0); - assert_eq!(hi, 0); - - // Produce five messages to the topic. - for _ in 0..5 { - producer.send(make_record(), timeout).await.unwrap(); - } - - // Ensure that the high water mark has advanced to 5. - let (lo, hi) = producer - .client() - .fetch_watermarks(&topic, 0, timeout) - .unwrap(); - assert_eq!(lo, 0); - assert_eq!(hi, 5); - - // Delete the record at offset 0. - let mut tpl = TopicPartitionList::new(); - tpl.add_partition_offset(&topic, 0, Offset::Offset(1)) - .unwrap(); - let res_tpl = admin_client.delete_records(&tpl, &opts).await.unwrap(); - assert_eq!(res_tpl.count(), 1); - assert_eq!(res_tpl.elements()[0].topic(), topic); - assert_eq!(res_tpl.elements()[0].partition(), 0); - assert_eq!(res_tpl.elements()[0].offset(), Offset::Offset(1)); - assert_eq!(res_tpl.elements()[0].error(), Ok(())); - - // Ensure that the low water mark has advanced to 1. - let (lo, hi) = producer - .client() - .fetch_watermarks(&topic, 0, timeout) - .unwrap(); - assert_eq!(lo, 1); - assert_eq!(hi, 5); - - // Delete the record at offset 1 and also include an invalid partition in - // the request. The invalid partition should not cause the request to fail, - // but we should be able to see the per-partition error in the returned - // topic partition list. - let mut tpl = TopicPartitionList::new(); - tpl.add_partition_offset(&topic, 0, Offset::Offset(2)) - .unwrap(); - tpl.add_partition_offset(&topic, 1, Offset::Offset(1)) - .unwrap(); - let res_tpl = admin_client.delete_records(&tpl, &opts).await.unwrap(); - assert_eq!(res_tpl.count(), 2); - assert_eq!(res_tpl.elements()[0].topic(), topic); - assert_eq!(res_tpl.elements()[0].partition(), 0); - assert_eq!(res_tpl.elements()[0].offset(), Offset::Offset(2)); - assert_eq!(res_tpl.elements()[0].error(), Ok(())); - assert_eq!(res_tpl.elements()[1].topic(), topic); - assert_eq!(res_tpl.elements()[1].partition(), 1); - assert_eq!( - res_tpl.elements()[1].error(), - Err(KafkaError::OffsetFetch(RDKafkaErrorCode::UnknownPartition)) - ); - - // Ensure that the low water mark has advanced to 2. - let (lo, hi) = producer - .client() - .fetch_watermarks(&topic, 0, timeout) - .unwrap(); - assert_eq!(lo, 2); - assert_eq!(hi, 5); - - // Delete all records up to offset 5. - let mut tpl = TopicPartitionList::new(); - tpl.add_partition_offset(&topic, 0, Offset::End).unwrap(); - let res_tpl = admin_client.delete_records(&tpl, &opts).await.unwrap(); - assert_eq!(res_tpl.count(), 1); - assert_eq!(res_tpl.elements()[0].topic(), topic); - assert_eq!(res_tpl.elements()[0].partition(), 0); - assert_eq!(res_tpl.elements()[0].offset(), Offset::Offset(5)); - assert_eq!(res_tpl.elements()[0].error(), Ok(())); - - // Ensure that the low water mark has advanced to 5. - let (lo, hi) = producer - .client() - .fetch_watermarks(&topic, 0, timeout) - .unwrap(); - assert_eq!(lo, 5); - assert_eq!(hi, 5); -} - -#[tokio::test] -async fn test_configs() { - let admin_client = create_admin_client(); - let opts = AdminOptions::new(); - let broker = ResourceSpecifier::Broker(0); - - let res = admin_client - .describe_configs(&[broker], &opts) - .await - .expect("describe configs failed"); - let config = &res[0].as_ref().expect("describe configs failed"); - let orig_val = config - .get("log.flush.interval.messages") - .expect("original config entry missing") - .value - .as_ref() - .expect("original value missing"); - - let config = AlterConfig::new(broker).set("log.flush.interval.messages", "1234"); - let res = admin_client - .alter_configs(&[config], &opts) - .await - .expect("alter configs failed"); - assert_eq!(res, &[Ok(OwnedResourceSpecifier::Broker(0))]); - - let mut tries = 0; - loop { - let res = admin_client - .describe_configs(&[broker], &opts) - .await - .expect("describe configs failed"); - let config = &res[0].as_ref().expect("describe configs failed"); - let entry = config.get("log.flush.interval.messages"); - let expected_entry = if get_broker_version() < KafkaVersion(1, 1, 0, 0) { - // Pre-1.1, the AlterConfig operation will silently fail, and the - // config will remain unchanged, which I guess is worth testing. - ConfigEntry { - name: "log.flush.interval.messages".into(), - value: Some(orig_val.clone()), - source: ConfigSource::Default, - is_read_only: true, - is_default: true, - is_sensitive: false, - } - } else { - ConfigEntry { - name: "log.flush.interval.messages".into(), - value: Some("1234".into()), - source: ConfigSource::DynamicBroker, - is_read_only: false, - is_default: false, - is_sensitive: false, - } - }; - if entry == Some(&expected_entry) { - break; - } else if tries >= 5 { - panic!("{:?} != {:?}", entry, Some(&expected_entry)); - } else { - tries += 1; - tokio::time::sleep(Duration::from_secs(1)).await; - } - } - - let config = AlterConfig::new(broker).set("log.flush.interval.ms", orig_val); - let res = admin_client - .alter_configs(&[config], &opts) - .await - .expect("alter configs failed"); - assert_eq!(res, &[Ok(OwnedResourceSpecifier::Broker(0))]); -} - -#[tokio::test] -async fn test_groups() { - let admin_client = create_admin_client(); - - // Verify that a valid group can be deleted. - { - let group_name = rand_test_group(); - create_consumer_group(&group_name).await; - let res = admin_client - .delete_groups(&[&group_name], &AdminOptions::default()) - .await; - assert_eq!(res, Ok(vec![Ok(group_name.to_string())])); - } - - // Verify that attempting to delete an unknown group returns a "group not - // found" error. - { - let unknown_group_name = rand_test_group(); - let res = admin_client - .delete_groups(&[&unknown_group_name], &AdminOptions::default()) - .await; - let expected: GroupResult = Err((unknown_group_name, RDKafkaErrorCode::GroupIdNotFound)); - assert_eq!(res, Ok(vec![expected])); - } - - // Verify that deleting a valid and invalid group results in a mixed result - // set. - { - let group_name = rand_test_group(); - let unknown_group_name = rand_test_group(); - create_consumer_group(&group_name).await; - let res = admin_client - .delete_groups( - &[&group_name, &unknown_group_name], - &AdminOptions::default(), - ) - .await; - assert_eq!( - res, - Ok(vec![ - Ok(group_name.to_string()), - Err(( - unknown_group_name.to_string(), - RDKafkaErrorCode::GroupIdNotFound - )) - ]) - ); - } -} - -// Tests whether each admin operation properly reports an error if the entire -// request fails. The original implementations failed to check this, resulting -// in confusing situations where a failed admin request would return Ok([]). -#[tokio::test] -async fn test_event_errors() { - // Configure an admin client to target a Kafka server that doesn't exist, - // then set an impossible timeout. This will ensure that every request fails - // with an OperationTimedOut error, assuming, of course, that the request - // passes client-side validation. - let admin_client = ClientConfig::new() - .set("bootstrap.servers", "noexist") - .create::>() - .expect("admin client creation failed"); - let opts = AdminOptions::new().request_timeout(Some(Duration::from_nanos(1))); - - let res = admin_client.create_topics(&[], &opts).await; - assert_eq!( - res, - Err(KafkaError::AdminOp(RDKafkaErrorCode::OperationTimedOut)) - ); - - let res = admin_client.create_partitions(&[], &opts).await; - assert_eq!( - res, - Err(KafkaError::AdminOp(RDKafkaErrorCode::OperationTimedOut)) - ); - - let res = admin_client.delete_topics(&[], &opts).await; - assert_eq!( - res, - Err(KafkaError::AdminOp(RDKafkaErrorCode::OperationTimedOut)) - ); - - let res = admin_client.describe_configs(&[], &opts).await; - assert_eq!( - res.err(), - Some(KafkaError::AdminOp(RDKafkaErrorCode::OperationTimedOut)) - ); - - let res = admin_client.alter_configs(&[], &opts).await; - assert_eq!( - res, - Err(KafkaError::AdminOp(RDKafkaErrorCode::OperationTimedOut)) - ); -} diff --git a/tests/test_topic_partition_list.rs b/tests/topic_partition_lists.rs similarity index 92% rename from tests/test_topic_partition_list.rs rename to tests/topic_partition_lists.rs index 2f0ac3511..69279b89f 100644 --- a/tests/test_topic_partition_list.rs +++ b/tests/topic_partition_lists.rs @@ -1,4 +1,4 @@ -use rdkafka::{Offset, TopicPartitionList}; +use rdkafka::topic_partition_list::{Offset, TopicPartitionList}; /// Test topic partition list API and wrappers. diff --git a/tests/test_transactions.rs b/tests/transactions.rs similarity index 54% rename from tests/test_transactions.rs rename to tests/transactions.rs index 014c4e000..9968c3251 100644 --- a/tests/test_transactions.rs +++ b/tests/transactions.rs @@ -1,12 +1,11 @@ //! Test transactions using the base consumer and producer. -use std::collections::HashMap; use std::error::Error; use std::time::Duration; use log::info; -use maplit::hashmap; +use rdkafka::admin::AdminOptions; use rdkafka::config::ClientConfig; use rdkafka::config::RDKafkaLogLevel; use rdkafka::consumer::{BaseConsumer, CommitMode, Consumer}; @@ -15,22 +14,45 @@ use rdkafka::producer::{BaseProducer, BaseRecord, Producer}; use rdkafka::topic_partition_list::{Offset, TopicPartitionList}; use rdkafka::util::Timeout; -use utils::*; +use crate::utils::admin; +use crate::utils::containers::KafkaContext; +use crate::utils::logging::init_test_logger; +use crate::utils::producer; +use crate::utils::rand::*; +use crate::utils::*; mod utils; -fn create_consumer( - config_overrides: Option>, -) -> Result { - configure_logging_for_tests(); - consumer_config(&rand_test_group(), config_overrides).create() +async fn create_consumer( + kafka_context: &KafkaContext, + config_overrides: Option<&[(&str, &str)]>, +) -> Result, KafkaError> { + init_test_logger(); + let group_id = rand_test_group(); + let mut config = ClientConfig::new(); + config + .set("group.id", &group_id) + .set("enable.partition.eof", "false") + .set("client.id", "rdkafka_integration_test_client") + .set("bootstrap.servers", &kafka_context.bootstrap_servers) + .set("session.timeout.ms", "6000") + .set("debug", "all") + .set("auto.offset.reset", "earliest"); + + if let Some(overrides) = config_overrides { + for (key, value) in overrides { + config.set(*key, *value); + } + } + + config.create_with_context(ConsumerTestContext { _n: 64 }) } -fn create_producer() -> Result { - configure_logging_for_tests(); +fn create_producer(kafka_context: &KafkaContext) -> Result { + init_test_logger(); let mut config = ClientConfig::new(); config - .set("bootstrap.servers", get_bootstrap_server()) + .set("bootstrap.servers", &kafka_context.bootstrap_servers) .set("message.timeout.ms", "5000") .set("enable.idempotence", "true") .set("transactional.id", rand_test_transactional_id()) @@ -44,14 +66,22 @@ enum IsolationLevel { ReadCommitted, } -fn count_records(topic: &str, iso: IsolationLevel) -> Result { - let consumer = create_consumer(Some(hashmap! { - "isolation.level" => match iso { - IsolationLevel::ReadUncommitted => "read_uncommitted", - IsolationLevel::ReadCommitted => "read_committed", - }, - "enable.partition.eof" => "true" - }))?; +async fn count_records( + kafka_context: &KafkaContext, + topic: &str, + iso: IsolationLevel, +) -> Result { + let isolation = match iso { + IsolationLevel::ReadUncommitted => "read_uncommitted", + IsolationLevel::ReadCommitted => "read_committed", + }; + + let consumer = create_consumer( + kafka_context, + Some(&[("isolation.level", isolation), ("enable.partition.eof", "true")]), + ) + .await?; + let mut tpl = TopicPartitionList::new(); tpl.add_partition(topic, 0); consumer.assign(&tpl)?; @@ -68,13 +98,39 @@ fn count_records(topic: &str, iso: IsolationLevel) -> Result #[tokio::test] async fn test_transaction_abort() -> Result<(), Box> { + init_test_logger(); + + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let consume_topic = rand_test_topic("test_transaction_abort"); let produce_topic = rand_test_topic("test_transaction_abort"); - populate_topic(&consume_topic, 30, &value_fn, &key_fn, Some(0), None).await; + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&consume_topic, Some(1)), + &AdminOptions::default(), + ) + .await + .expect("could not create consume topic"); + admin_client + .create_topics( + &admin::new_topic_vec(&produce_topic, Some(1)), + &AdminOptions::default(), + ) + .await + .expect("could not create produce topic"); + + let future_producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + let _ = produce_messages_to_partition(&future_producer, &consume_topic, 30, 0).await; // Create consumer and subscribe to `consume_topic`. - let consumer = create_consumer(None)?; + let consumer = create_consumer(&kafka_context, None).await?; consumer.subscribe(&[&consume_topic])?; consumer.poll(Timeout::Never).unwrap()?; @@ -84,7 +140,7 @@ async fn test_transaction_abort() -> Result<(), Box> { consumer.commit(&commit_tpl, CommitMode::Sync).unwrap(); // Create a producer and start a transaction. - let producer = create_producer()?; + let producer = create_producer(&kafka_context)?; producer.init_transactions(Timeout::Never)?; producer.begin_transaction()?; @@ -116,11 +172,11 @@ async fn test_transaction_abort() -> Result<(), Box> { // Check that no records were produced in read committed mode, but that // the records are visible in read uncommitted mode. assert_eq!( - count_records(&produce_topic, IsolationLevel::ReadCommitted)?, + count_records(&kafka_context, &produce_topic, IsolationLevel::ReadCommitted).await?, 0, ); assert_eq!( - count_records(&produce_topic, IsolationLevel::ReadUncommitted)?, + count_records(&kafka_context, &produce_topic, IsolationLevel::ReadUncommitted).await?, 10, ); @@ -139,13 +195,39 @@ async fn test_transaction_abort() -> Result<(), Box> { #[tokio::test] async fn test_transaction_commit() -> Result<(), Box> { + init_test_logger(); + + let kafka_context = KafkaContext::shared() + .await + .expect("could not create kafka context"); let consume_topic = rand_test_topic("test_transaction_commit"); let produce_topic = rand_test_topic("test_transaction_commit"); - populate_topic(&consume_topic, 30, &value_fn, &key_fn, Some(0), None).await; + let admin_client = admin::create_admin_client(&kafka_context.bootstrap_servers) + .await + .expect("Could not create admin client"); + admin_client + .create_topics( + &admin::new_topic_vec(&consume_topic, Some(1)), + &AdminOptions::default(), + ) + .await + .expect("could not create consume topic"); + admin_client + .create_topics( + &admin::new_topic_vec(&produce_topic, Some(1)), + &AdminOptions::default(), + ) + .await + .expect("could not create produce topic"); + + let future_producer = producer::future_producer::create_producer(&kafka_context.bootstrap_servers) + .await + .expect("Could not create Future producer"); + let _ = produce_messages_to_partition(&future_producer, &consume_topic, 30, 0).await; // Create consumer and subscribe to `consume_topic`. - let consumer = create_consumer(None)?; + let consumer = create_consumer(&kafka_context, None).await?; consumer.subscribe(&[&consume_topic])?; consumer.poll(Timeout::Never).unwrap()?; @@ -155,7 +237,7 @@ async fn test_transaction_commit() -> Result<(), Box> { consumer.commit(&commit_tpl, CommitMode::Sync).unwrap(); // Create a producer and start a transaction. - let producer = create_producer()?; + let producer = create_producer(&kafka_context)?; producer.init_transactions(Timeout::Never)?; producer.begin_transaction()?; @@ -182,11 +264,11 @@ async fn test_transaction_commit() -> Result<(), Box> { // Check that 10 records were produced. assert_eq!( - count_records(&produce_topic, IsolationLevel::ReadUncommitted)?, + count_records(&kafka_context, &produce_topic, IsolationLevel::ReadUncommitted).await?, 10, ); assert_eq!( - count_records(&produce_topic, IsolationLevel::ReadCommitted)?, + count_records(&kafka_context, &produce_topic, IsolationLevel::ReadCommitted).await?, 10, ); diff --git a/tests/utils/admin.rs b/tests/utils/admin.rs new file mode 100644 index 000000000..2d9e0bb0e --- /dev/null +++ b/tests/utils/admin.rs @@ -0,0 +1,38 @@ +use anyhow::{bail, Context}; +use rdkafka::admin::{AdminClient, AdminOptions, NewTopic, TopicReplication}; +use rdkafka::client::DefaultClientContext; +use rdkafka::config::FromClientConfig; +use rdkafka::ClientConfig; + +pub async fn create_admin_client( + bootstrap_servers: &str, +) -> anyhow::Result> { + let mut admin_client_config = ClientConfig::default(); + admin_client_config.set("bootstrap.servers", bootstrap_servers); + AdminClient::from_config(&admin_client_config).context("error creating admin client") +} + +pub async fn create_topic( + admin_client: &AdminClient, + topic_name: &'_ str, +) -> anyhow::Result { + let topic_results = admin_client + .create_topics(&new_topic_vec(&topic_name, None), &AdminOptions::default()) + .await + .context("error creating topics")?; + for topic_result in topic_results { + if topic_result.is_err() { + bail!("failed to create topic: {:?}", topic_result.unwrap_err()); + }; + } + Ok(topic_name.to_string()) +} + +pub fn new_topic_vec(topic_name: &'_ str, num_partitions: Option) -> Vec> { + let new_topic = NewTopic::new( + &topic_name, + num_partitions.unwrap_or(1), + TopicReplication::Fixed(1), + ); + vec![new_topic] +} diff --git a/tests/utils/consumer/mod.rs b/tests/utils/consumer/mod.rs new file mode 100644 index 000000000..e2afbb0a3 --- /dev/null +++ b/tests/utils/consumer/mod.rs @@ -0,0 +1,161 @@ +pub mod stream_consumer; + +use crate::utils::rand::rand_test_group; +use crate::utils::ConsumerTestContext; +use anyhow::{bail, Context}; +use backon::{BlockingRetryable, ExponentialBuilder}; +use rdkafka::config::FromClientConfig; +use rdkafka::consumer::{BaseConsumer, CommitMode, Consumer}; +use rdkafka::message::BorrowedMessage; +use rdkafka::metadata::Metadata; +use rdkafka::{ClientConfig, TopicPartitionList}; +use std::time::Duration; + +pub async fn create_subscribed_base_consumer( + bootstrap_servers: &str, + consumer_group_option: Option<&str>, + test_topic: &str, +) -> anyhow::Result { + let unsubscribed_base_consumer = + create_unsubscribed_base_consumer(bootstrap_servers, consumer_group_option).await?; + unsubscribed_base_consumer + .subscribe(&[test_topic]) + .context("Failed to subscribe to topic")?; + Ok(unsubscribed_base_consumer) +} + +pub async fn create_unsubscribed_base_consumer( + bootstrap_servers: &str, + consumer_group_option: Option<&str>, +) -> anyhow::Result { + let consumer_group_name = match consumer_group_option { + Some(consumer_group_name) => consumer_group_name, + None => &rand_test_group(), + }; + let mut consumer_client_config = ClientConfig::default(); + consumer_client_config.set("group.id", consumer_group_name); + consumer_client_config.set("client.id", "rdkafka_integration_test_client"); + consumer_client_config.set("bootstrap.servers", bootstrap_servers); + consumer_client_config.set("enable.partition.eof", "false"); + consumer_client_config.set("session.timeout.ms", "6000"); + consumer_client_config.set("enable.auto.commit", "false"); + consumer_client_config.set("debug", "all"); + consumer_client_config.set("auto.offset.reset", "earliest"); + + BaseConsumer::from_config(&consumer_client_config).context("Failed to create consumer") +} + +pub fn create_base_consumer( + bootstrap_servers: &str, + consumer_group: &str, + config_overrides: Option<&[(&str, &str)]>, +) -> anyhow::Result> { + let mut consumer_client_config = ClientConfig::default(); + consumer_client_config.set("group.id", consumer_group); + consumer_client_config.set("client.id", "rdkafka_integration_test_client"); + consumer_client_config.set("bootstrap.servers", bootstrap_servers); + consumer_client_config.set("enable.partition.eof", "false"); + consumer_client_config.set("session.timeout.ms", "6000"); + consumer_client_config.set("enable.auto.commit", "false"); + consumer_client_config.set("debug", "all"); + consumer_client_config.set("auto.offset.reset", "earliest"); + + if let Some(overrides) = config_overrides { + for (key, value) in overrides { + consumer_client_config.set(*key, *value); + } + } + + consumer_client_config + .create_with_context::>( + ConsumerTestContext { _n: 64 }, + ) + .context("Failed to create consumer with context") +} + +pub async fn poll_x_times_for_messages( + consumer: &BaseConsumer, + times_to_poll: i32, +) -> anyhow::Result>> { + let mut borrowed_messages: Vec = Vec::new(); + + for _ in 0..times_to_poll { + let Some(next_message_result) = consumer.poll(Duration::from_secs(2)) else { + continue; + }; + + let Ok(borrowed_next_message) = next_message_result else { + panic!( + "could not get next message from based_consumer: {}", + next_message_result.unwrap_err() + ); + }; + borrowed_messages.push(borrowed_next_message); + tokio::time::sleep(Duration::from_millis(100)).await; + } + + Ok(borrowed_messages) +} + +pub fn fetch_consumer_metadata(consumer: &BaseConsumer, topic: &str) -> anyhow::Result { + let timeout = Some(Duration::from_secs(1)); + + (|| { + let metadata = consumer + .fetch_metadata(Some(topic), timeout) + .context("Failed to fetch metadata")?; + if metadata.topics().is_empty() { + bail!("metadata fetch returned no topics".to_string()) + } + let topic = &metadata.topics()[0]; + if topic.partitions().is_empty() { + bail!("metadata fetch returned a topic with no partitions".to_string()) + } + Ok(metadata) + }) + .retry(ExponentialBuilder::default().with_max_delay(Duration::from_secs(5))) + .call() +} + +pub fn verify_topic_deleted(consumer: &BaseConsumer, topic: &str) -> anyhow::Result<()> { + let timeout = Some(Duration::from_secs(1)); + + (|| { + // Asking about the topic specifically will recreate it (under the + // default Kafka configuration, at least) so we have to ask for the list + // of all topics and search through it. + let metadata = consumer + .fetch_metadata(None, timeout) + .context("Failed to fetch metadata")?; + if metadata.topics().iter().any(|t| t.name() == topic) { + bail!(format!("topic {} still exists", topic)) + } + Ok(()) + }) + .retry(ExponentialBuilder::default().with_max_delay(Duration::from_secs(5))) + .call() +} + +pub async fn create_consumer_group_on_topic( + consumer_client: &BaseConsumer, + topic_name: &str, +) -> anyhow::Result<()> { + let topic_partition_list = { + let mut lst = TopicPartitionList::new(); + lst.add_partition(topic_name, 0); + lst + }; + consumer_client + .assign(&topic_partition_list) + .context("assign topic partition list failed")?; + consumer_client + .fetch_metadata(None, Duration::from_secs(3)) + .context("unable to fetch metadata")?; + (|| consumer_client.store_offset(topic_name, 0, -1)) + .retry(ExponentialBuilder::default().with_max_delay(Duration::from_secs(5))) + .call() + .context("store offset failed")?; + consumer_client + .commit_consumer_state(CommitMode::Sync) + .context("commit the consumer state failed") +} diff --git a/tests/utils/consumer/stream_consumer.rs b/tests/utils/consumer/stream_consumer.rs new file mode 100644 index 000000000..9e94d3a96 --- /dev/null +++ b/tests/utils/consumer/stream_consumer.rs @@ -0,0 +1,40 @@ +use anyhow::Context; +use rdkafka::config::FromClientConfig; +use rdkafka::consumer::StreamConsumer; +use rdkafka::ClientConfig; + +pub async fn create_stream_consumer( + bootstrap_server: &str, + consumer_group_option: Option<&str>, +) -> anyhow::Result { + let mut client_config = ClientConfig::default(); + client_config.set("bootstrap.servers", bootstrap_server); + client_config.set("auto.offset.reset", "earliest"); + if let Some(group) = consumer_group_option { + client_config.set("group.id", group); + } + + let stream_consumer = + StreamConsumer::from_config(&client_config).context("failed to create stream consumer")?; + Ok(stream_consumer) +} + +pub async fn create_stream_consumer_with_options( + bootstrap_server: &str, + consumer_group: &str, + options: &[(&str, &str)], +) -> anyhow::Result { + let mut client_config = ClientConfig::default(); + client_config.set("bootstrap.servers", bootstrap_server); + client_config.set("group.id", consumer_group); + client_config.set("auto.offset.reset", "earliest"); + client_config.set("enable.auto.commit", "false"); + + for (key, value) in options { + client_config.set(*key, *value); + } + + let stream_consumer = + StreamConsumer::from_config(&client_config).context("failed to create stream consumer")?; + Ok(stream_consumer) +} diff --git a/tests/utils/containers.rs b/tests/utils/containers.rs new file mode 100644 index 000000000..560d7ce7e --- /dev/null +++ b/tests/utils/containers.rs @@ -0,0 +1,90 @@ +use anyhow::Context; +use std::fmt::Debug; +use std::sync::Arc; +use testcontainers_modules::kafka::apache::Kafka; +use testcontainers_modules::testcontainers::core::ContainerPort; +use testcontainers_modules::testcontainers::runners::AsyncRunner; +use testcontainers_modules::testcontainers::{ContainerAsync, Image}; +use tokio::sync::OnceCell; + +pub struct KafkaContext { + kafka_node: ContainerAsync, + pub bootstrap_servers: String, + pub version: String, +} + +impl Debug for KafkaContext { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("KafkaContext").finish() + } +} + +impl KafkaContext { + pub async fn shared() -> anyhow::Result> { + static INSTANCE: OnceCell> = OnceCell::const_new(); + + INSTANCE + .get_or_try_init(init) + .await + .context("Failed to initialize Kafka shared instance") + .map(Arc::clone) + } + + pub async fn std_out(&self) -> anyhow::Result { + let std_out_byte_vec = self + .kafka_node + .stdout_to_vec() + .await + .context("Failed to get stdout")?; + Ok(String::from_utf8(std_out_byte_vec)?) + } + + pub async fn std_err(&self) -> anyhow::Result { + let std_err_byte_vec = self + .kafka_node + .stderr_to_vec() + .await + .context("Failed to get stderr")?; + Ok(String::from_utf8(std_err_byte_vec)?) + } +} + +async fn init() -> anyhow::Result> { + let kafka_container = Kafka::default(); + let kafka_version = kafka_container.tag().to_string(); + + let kafka_node = kafka_container + .start() + .await + .context("Failed to start Kafka")?; + let kafka_host = kafka_node + .get_host() + .await + .context("Failed to get Kafka host")?; + let kafka_port = kafka_node + .get_host_port_ipv4(ContainerPort::Tcp(9092)) + .await?; + + Ok::, anyhow::Error>(Arc::new(KafkaContext { + kafka_node, + bootstrap_servers: format!("{}:{}", kafka_host, kafka_port), + version: kafka_version, + })) +} + +#[tokio::test] +pub async fn test_kafka_context_works() { + let kafka_context_result = KafkaContext::shared().await; + let Ok(kafka_context) = kafka_context_result else { + panic!( + "Failed to get Kafka context: {}", + kafka_context_result.unwrap_err() + ); + }; + + assert_ne!( + kafka_context.bootstrap_servers.len(), + 0, + "Bootstrap servers empty" + ); +} diff --git a/tests/utils/logging.rs b/tests/utils/logging.rs new file mode 100644 index 000000000..401db65a5 --- /dev/null +++ b/tests/utils/logging.rs @@ -0,0 +1,9 @@ +use std::sync::Once; + +static INIT: Once = Once::new(); + +pub fn init_test_logger() { + INIT.call_once(|| { + env_logger::try_init().expect("Failed to initialize env_logger"); + }); +} diff --git a/tests/utils.rs b/tests/utils/mod.rs similarity index 63% rename from tests/utils.rs rename to tests/utils/mod.rs index 10ab34cf5..e68caa5cf 100644 --- a/tests/utils.rs +++ b/tests/utils/mod.rs @@ -1,13 +1,20 @@ #![allow(dead_code)] +pub mod admin; +pub mod consumer; +pub mod containers; +pub mod logging; +pub mod producer; +pub mod rand; +pub mod topics; + use std::collections::HashMap; -use std::env::{self, VarError}; -use std::sync::Once; +use std::env::{self}; use std::time::Duration; -use rand::distr::{Alphanumeric, SampleString}; use regex::Regex; +use crate::utils::containers::KafkaContext; use rdkafka::admin::{AdminClient, AdminOptions, NewTopic, TopicReplication}; use rdkafka::client::ClientContext; use rdkafka::config::ClientConfig; @@ -18,48 +25,25 @@ use rdkafka::producer::{FutureProducer, FutureRecord}; use rdkafka::statistics::Statistics; use rdkafka::TopicPartitionList; -pub fn rand_test_topic(test_name: &str) -> String { - let id = Alphanumeric.sample_string(&mut rand::rng(), 10); - format!("__{}_{}", test_name, id) -} - -pub fn rand_test_group() -> String { - let id = Alphanumeric.sample_string(&mut rand::rng(), 10); - format!("__test_{}", id) -} - -pub fn rand_test_transactional_id() -> String { - let id = Alphanumeric.sample_string(&mut rand::rng(), 10); - format!("__test_{}", id) -} +pub const BROKER_ID: i32 = 1; pub fn get_bootstrap_server() -> String { env::var("KAFKA_HOST").unwrap_or_else(|_| "localhost:9092".to_owned()) } -pub fn get_broker_version() -> KafkaVersion { - // librdkafka doesn't expose this directly, sadly. - match env::var("KAFKA_VERSION") { - Ok(v) => { - let regex = Regex::new(r"^(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:\.(\d+))?$").unwrap(); - match regex.captures(&v) { - Some(captures) => { - let extract = |i| { - captures - .get(i) - .map(|m| m.as_str().parse().unwrap()) - .unwrap_or(0) - }; - KafkaVersion(extract(1), extract(2), extract(3), extract(4)) - } - None => panic!("KAFKA_VERSION env var was not in expected [n[.n[.n[.n]]]] format"), - } - } - Err(VarError::NotUnicode(_)) => { - panic!("KAFKA_VERSION env var contained non-unicode characters") +pub fn get_broker_version(kafka_context: &KafkaContext) -> KafkaVersion { + let regex = Regex::new(r"^(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:\.(\d+))?$").unwrap(); + match regex.captures(&kafka_context.version) { + Some(captures) => { + let extract = |i| { + captures + .get(i) + .map(|m| m.as_str().parse().unwrap()) + .unwrap_or(0) + }; + KafkaVersion(extract(1), extract(2), extract(3), extract(4)) } - // If the environment variable is unset, assume we're running the latest version. - Err(VarError::NotPresent) => KafkaVersion(u32::MAX, u32::MAX, u32::MAX, u32::MAX), + None => panic!("KAFKA_VERSION env var was not in expected [n[.n[.n[.n]]]] format"), } } @@ -145,6 +129,75 @@ where message_map } +pub async fn produce_messages_with_timestamp( + producer: &FutureProducer, + topic_name: &str, + count: usize, + partition: i32, + timestamp: i64, +) -> HashMap<(i32, i64), i32> { + produce_messages( + producer, + topic_name, + count, + Some(partition), + Some(timestamp), + ) + .await +} + +pub async fn produce_messages_to_partition( + producer: &FutureProducer, + topic_name: &str, + count: usize, + partition: i32, +) -> HashMap<(i32, i64), i32> { + produce_messages(producer, topic_name, count, Some(partition), None).await +} + +pub async fn produce_messages( + producer: &FutureProducer, + topic_name: &str, + count: usize, + partition: Option, + timestamp: Option, +) -> HashMap<(i32, i64), i32> { + let mut inflight = Vec::with_capacity(count); + + for idx in 0..count { + let id = idx as i32; + let payload = value_fn(id); + let key = key_fn(id); + let mut record = FutureRecord::to(topic_name).payload(&payload).key(&key); + if let Some(partition) = partition { + record = record.partition(partition); + } + if let Some(timestamp) = timestamp { + record = record.timestamp(timestamp); + } + let delivery_future = producer + .send_result(record) + .expect("failed to enqueue message"); + inflight.push((id, payload, key, delivery_future)); + } + + let mut message_map = HashMap::new(); + + for (id, _payload, _key, delivery_future) in inflight { + match delivery_future + .await + .expect("producer unexpectedly dropped") + { + Ok(delivery) => { + message_map.insert((delivery.partition, delivery.offset), id); + } + Err((error, _message)) => panic!("Delivery failed: {}", error), + }; + } + + message_map +} + pub fn value_fn(id: i32) -> String { format!("Message {}", id) } @@ -194,32 +247,3 @@ pub fn consumer_config( config } - -static INIT: Once = Once::new(); - -pub fn configure_logging_for_tests() { - INIT.call_once(|| { - env_logger::try_init().expect("Failed to initialize env_logger"); - }); -} - -#[cfg(test)] -mod tests { - use super::*; - - #[tokio::test] - async fn test_populate_topic() { - let topic_name = rand_test_topic("test_populate_topic"); - let message_map = populate_topic(&topic_name, 100, &value_fn, &key_fn, Some(0), None).await; - - let total_messages = message_map - .iter() - .filter(|&(&(partition, _), _)| partition == 0) - .count(); - assert_eq!(total_messages, 100); - - let mut ids = message_map.values().copied().collect::>(); - ids.sort(); - assert_eq!(ids, (0..100).collect::>()); - } -} diff --git a/tests/utils/producer/base_producer.rs b/tests/utils/producer/base_producer.rs new file mode 100644 index 000000000..0ad7c4fac --- /dev/null +++ b/tests/utils/producer/base_producer.rs @@ -0,0 +1,98 @@ +use anyhow::{bail, Context}; +use rdkafka::config::{FromClientConfig, FromClientConfigAndContext}; +use rdkafka::producer::{ + BaseProducer, BaseRecord, Partitioner, Producer, ProducerContext, ThreadedProducer, +}; +use rdkafka::util::Timeout; +use rdkafka::ClientConfig; +use std::time::Duration; + +pub async fn create_producer(bootstrap_servers: &str) -> anyhow::Result { + let mut producer_client_config = ClientConfig::default(); + producer_client_config.set("bootstrap.servers", bootstrap_servers); + let base_producer_result = create_base_producer(&producer_client_config); + let Ok(base_producer) = base_producer_result else { + panic!( + "could not create based_producer: {}", + base_producer_result.unwrap_err() + ); + }; + Ok(base_producer) +} + +pub fn create_base_producer(config: &ClientConfig) -> anyhow::Result { + let base_producer_result = BaseProducer::from_config(config); + let Ok(base_producer) = base_producer_result else { + anyhow::bail!( + "error creating base producer: {}", + base_producer_result.unwrap_err() + ) + }; + Ok(base_producer) +} + +pub fn create_base_producer_with_context( + bootstrap_servers: &str, + context: C, + config_overrides: &[(&str, &str)], +) -> anyhow::Result> +where + C: ProducerContext, + Part: Partitioner, +{ + let mut producer_client_config = ClientConfig::default(); + producer_client_config.set("bootstrap.servers", bootstrap_servers); + producer_client_config.set("message.timeout.ms", "5000"); + + for (key, value) in config_overrides { + producer_client_config.set(*key, *value); + } + + BaseProducer::from_config_and_context(&producer_client_config, context) + .context("error creating base producer with context") +} + +pub fn create_threaded_producer_with_context( + bootstrap_servers: &str, + context: C, + config_overrides: &[(&str, &str)], +) -> anyhow::Result> +where + C: ProducerContext, + Part: Partitioner + Send + Sync + 'static, +{ + let mut producer_client_config = ClientConfig::default(); + producer_client_config.set("bootstrap.servers", bootstrap_servers); + producer_client_config.set("message.timeout.ms", "5000"); + + for (key, value) in config_overrides { + producer_client_config.set(*key, *value); + } + + ThreadedProducer::from_config_and_context(&producer_client_config, context) + .context("error creating threaded producer with context") +} + +pub async fn send_record( + producer: &BaseProducer, + record: BaseRecord<'_, [u8; 4], str>, +) -> anyhow::Result<()> { + let send_result = producer.send(record); + if send_result.is_err() { + bail!("could not produce record: {:?}", send_result.unwrap_err()); + } + if poll_and_flush(&producer).is_err() { + bail!("could not poll and flush base producer") + }; + + Ok(()) +} + +pub fn poll_and_flush(base_producer: &BaseProducer) -> anyhow::Result<()> { + for _ in 0..5 { + base_producer.poll(Duration::from_millis(100)); + } + base_producer + .flush(Timeout::After(Duration::from_secs(10))) + .context("flush failed") +} diff --git a/tests/utils/producer/future_producer.rs b/tests/utils/producer/future_producer.rs new file mode 100644 index 000000000..8d4891ad7 --- /dev/null +++ b/tests/utils/producer/future_producer.rs @@ -0,0 +1,12 @@ +use anyhow::Context; +use rdkafka::config::FromClientConfig; +use rdkafka::producer::FutureProducer; +use rdkafka::ClientConfig; + +pub async fn create_producer(bootstrap_servers: &str) -> anyhow::Result { + let mut producer_client_config = ClientConfig::default(); + producer_client_config.set("bootstrap.servers", bootstrap_servers); + let future_producer = FutureProducer::from_config(&producer_client_config) + .context("couldn't create producer client")?; + Ok(future_producer) +} diff --git a/tests/utils/producer/mod.rs b/tests/utils/producer/mod.rs new file mode 100644 index 000000000..f09b39c97 --- /dev/null +++ b/tests/utils/producer/mod.rs @@ -0,0 +1,2 @@ +pub mod base_producer; +pub mod future_producer; diff --git a/tests/utils/rand.rs b/tests/utils/rand.rs new file mode 100644 index 000000000..fabe6e668 --- /dev/null +++ b/tests/utils/rand.rs @@ -0,0 +1,16 @@ +use rand::distr::{Alphanumeric, SampleString}; + +pub fn rand_test_topic(test_name: &str) -> String { + let id = Alphanumeric.sample_string(&mut rand::rng(), 10); + format!("{}_{}", test_name, id) +} + +pub fn rand_test_group() -> String { + let id = Alphanumeric.sample_string(&mut rand::rng(), 10); + format!("__test_{}", id) +} + +pub fn rand_test_transactional_id() -> String { + let id = Alphanumeric.sample_string(&mut rand::rng(), 10); + format!("__test_{}", id) +} diff --git a/tests/utils/topics.rs b/tests/utils/topics.rs new file mode 100644 index 000000000..d6664219a --- /dev/null +++ b/tests/utils/topics.rs @@ -0,0 +1,44 @@ +use rdkafka::producer::{FutureProducer, FutureRecord}; +use std::collections::HashMap; +use std::time::Duration; + +pub type PartitionOffset = (i32, i64); +pub type MessageId = usize; + +pub async fn populate_topic_using_future_producer( + producer: &FutureProducer, + topic_name: &str, + num_messages: usize, + partition: Option, +) -> anyhow::Result> { + let message_send_futures = (0..num_messages) + .map(|id| { + let future = async move { + producer + .send( + FutureRecord { + topic: topic_name, + payload: Some(&id.to_string()), + key: Some(&id.to_string()), + partition, + timestamp: None, + headers: None, + }, + Duration::from_secs(1), + ) + .await + }; + (id, future) + }) + .collect::>(); + + let mut message_map = HashMap::::new(); + for (id, future) in message_send_futures { + match future.await { + Ok(delivered) => message_map.insert((delivered.partition, delivered.offset), id), + Err((kafka_error, _message)) => panic!("Delivery failed: {}", kafka_error), + }; + } + + Ok(message_map) +}