diff --git a/Cargo.lock b/Cargo.lock index e5390de..c134fa6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1524,14 +1524,22 @@ checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea" name = "datum-connect" version = "0.1.0" dependencies = [ + "async-trait", "clap", "dotenv", + "hickory-proto", + "hickory-server", + "humantime", + "iroh-base", "lib", "n0-error", + "serde", + "serde_yml", "tokio", "tokio-util", "tracing", "tracing-subscriber", + "z32", ] [[package]] @@ -3499,6 +3507,7 @@ dependencies = [ "rand 0.9.2", "ring", "rustls", + "serde", "thiserror 2.0.17", "tinyvec", "tokio", @@ -3530,6 +3539,29 @@ dependencies = [ "tracing", ] +[[package]] +name = "hickory-server" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d53e5fe811b941c74ee46b8818228bfd2bc2688ba276a0eaeb0f2c95ea3b2585" +dependencies = [ + "async-trait", + "bytes", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-util", + "hickory-proto", + "ipnet", + "prefix-trie", + "serde", + "thiserror 2.0.17", + "time", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "hkdf" version = "0.12.4" @@ -3605,6 +3637,12 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" +[[package]] +name = "humantime" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" + [[package]] name = "hybrid-array" version = "0.4.5" @@ -3990,6 +4028,9 @@ name = "ipnet" version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +dependencies = [ + "serde", +] [[package]] name = "iri-string" @@ -4582,6 +4623,7 @@ dependencies = [ "iroh-n0des", "iroh-proxy-utils", "iroh-quinn", + "iroh-relay", "iroh-tickets", "log", "n0-error", @@ -5496,7 +5538,7 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" dependencies = [ - "proc-macro-crate 2.0.2", + "proc-macro-crate 3.4.0", "proc-macro2", "quote", "syn 2.0.114", @@ -5508,7 +5550,7 @@ version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51e219e79014df21a225b1860a479e2dcd7cbd9130f4defd4bd0e191ea31d67d" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "chrono", "getrandom 0.2.17", "http", @@ -6291,6 +6333,16 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" +[[package]] +name = "prefix-trie" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85cf4c7c25f1dd66c76b451e9041a8cfce26e4ca754934fa7aed8d5a59a01d20" +dependencies = [ + "ipnet", + "num-traits", +] + [[package]] name = "primeorder" version = "0.13.6" diff --git a/Cargo.toml b/Cargo.toml index 542b68a..d36f152 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,6 +24,7 @@ iroh-base = { version = "0.95" } iroh-tickets = "0.2" iroh-metrics = "0.38" iroh-n0des = { version = "0.8", features = ["tickets"] } +iroh-relay = { version = "0.95" } log = "0.4" open = "5" openidconnect = "4.0.1" diff --git a/README.md b/README.md index 6056553..7cd9e2c 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,137 @@ cd cli cargo run -- --help ``` +### Local forward-proxy demo (no GUI) +This exercises the CONNECT-based gateway flow that Envoy will use in staging/prod. + +#### 1) Start a local DNS dev server (out-of-band) +Use a non-`.local` origin (e.g. `datumconnect.test`): + +``` +cargo run -p datum-connect -- dns-dev serve \ + --origin datumconnect.test \ + --bind 127.0.0.1:53535 \ + --data ./dns-dev.yml +``` + +#### 2) Start the listen node (connector side) +This prints the endpoint id and the iroh UDP bound sockets you must publish: + +``` +cargo run -p datum-connect -- serve +``` + +Copy the printed `dns-dev upsert` example, but run it via `cargo run -p datum-connect -- ...` +and make sure the origin matches `datumconnect.test`. Quote IPv6 addresses like `"[::]:1234"`. + +#### 3) Verify TXT resolution +The `serve` command prints the z-base-32 ID and the full DNS name. Query it with: + +``` +dig +norecurse @127.0.0.1 -p 53535 TXT _iroh..datumconnect.test +``` + +#### 4) Start the gateway in forward mode + +``` +cargo run -p datum-connect -- gateway \ + --port 8080 \ + --mode forward \ + --discovery dns \ + --dns-origin datumconnect.test \ + --dns-resolver 127.0.0.1:53535 + +Discovery modes: +- `default`: iroh defaults (n0 preset). +- `dns`: only the provided DNS origin/resolver. +- `hybrid`: default + custom DNS. +``` + +#### 5) Send a CONNECT request +If your target TCP service is on `127.0.0.1:5173`: + +``` +curl --proxytunnel -x 127.0.0.1:8080 \ + --proxy-header "x-iroh-endpoint-id: REPLACE_WITH_ENDPOINT_ID" \ + "http://127.0.0.1:5173" +``` + +### GUI demo (browser tunnel) +This mirrors the same flow, but uses the GUI to create the proxy entry. + +If you want a one-shot experience, run: + +``` +./scripts/try-ui-demo.sh +``` + +It starts dns-dev, an HTTPS origin, the gateway, and the GUI, and waits for you to +create a TCP proxy in the UI before visiting `https://localhost:5173` in the browser. + +#### 1) Start `dns-dev` +``` +cargo run -p datum-connect -- dns-dev serve \ + --origin datumconnect.test \ + --bind 127.0.0.1:53535 \ + --data ./dns-dev.yml +``` + +#### 2) Start a local HTTPS origin (so the browser uses CONNECT) +``` +openssl req -x509 -nodes -newkey rsa:2048 -days 1 \ + -keyout /tmp/iroh-dev.key -out /tmp/iroh-dev.crt \ + -subj "/CN=localhost" +openssl s_server -accept 5173 -cert /tmp/iroh-dev.crt -key /tmp/iroh-dev.key -www +``` + +#### 3) Run the GUI (share the repo with CLI) +``` +export DATUM_CONNECT_REPO=$(pwd)/.datum-connect-dev +cd ui +dx serve --platform desktop +``` + +#### 4) Create a proxy in the GUI +Add a TCP proxy for `127.0.0.1:5173`. + +#### 5) Start the listen node (uses the same repo) +``` +cd .. +export DATUM_CONNECT_REPO=$(pwd)/.datum-connect-dev +cargo run -p datum-connect -- serve +``` +Copy the printed `dns-dev upsert` example, but change the origin to `datumconnect.test` +and run it via `cargo run -p datum-connect -- ...` (quote IPv6 addresses). + +#### 6) Start the gateway in forward mode +``` +export DATUM_CONNECT_REPO=$(pwd)/.datum-connect-dev +cargo run -p datum-connect -- gateway \ + --port 8080 \ + --mode forward \ + --discovery dns \ + --dns-origin datumconnect.test \ + --dns-resolver 127.0.0.1:53535 +``` + +#### 7) Start a local entrypoint that always tunnels through the gateway +This avoids any browser proxy configuration. It listens on `127.0.0.1:8888` and +uses CONNECT under the hood to reach the target: +``` +cargo run -p datum-connect -- tunnel-dev \ + --gateway 127.0.0.1:8080 \ + --node-id REPLACE_WITH_ENDPOINT_ID \ + --target-host 127.0.0.1 \ + --target-port 5173 +``` +Now visit: +``` +https://localhost:8888 +``` +You should see the `openssl s_server` status page (cipher list + handshake info). +That output is expected and means the CONNECT request tunneled through the gateway +to the local origin. + ### Running the UI: to run the UI, make sure you have rust, cargo, and dioxus installed: diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 49aa454..5bcb7f8 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -12,3 +12,11 @@ tracing-subscriber.workspace = true clap = { version = "4.5.50", features = ["derive", "env"] } tracing.workspace = true tokio-util.workspace = true +serde.workspace = true +serde_yml.workspace = true +async-trait = "0.1.89" +humantime = "2.1.0" +hickory-server = "0.25.2" +hickory-proto = "0.25.2" +iroh-base.workspace = true +z32 = "1.0.3" \ No newline at end of file diff --git a/cli/src/dns_dev.rs b/cli/src/dns_dev.rs new file mode 100644 index 0000000..1b022db --- /dev/null +++ b/cli/src/dns_dev.rs @@ -0,0 +1,239 @@ +use std::{ + fs, + net::SocketAddr, + path::PathBuf, + str::FromStr, + time::{Duration, SystemTime}, +}; + +use hickory_proto::rr::{ + DNSClass, Name, RData, Record, + rdata::{NS, SOA, TXT}, +}; +use hickory_server::{ + ServerFuture, + authority::{Catalog, ZoneType}, + store::in_memory::InMemoryAuthority, +}; +use iroh_base::EndpointId; +use n0_error::StdResultExt; +use serde::{Deserialize, Serialize}; +use tokio::{net::UdpSocket, sync::RwLock, time}; +use tracing::{info, warn}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DnsDevConfig { + pub origin: String, + #[serde(default)] + pub records: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DnsDevRecord { + pub endpoint_id: String, + #[serde(default)] + pub relay: Option, + #[serde(default)] + pub addrs: Vec, +} + +pub async fn serve( + bind_addr: SocketAddr, + config_path: PathBuf, + origin: String, + reload_interval: Duration, +) -> n0_error::Result<()> { + let mut last_modified = fs::metadata(&config_path) + .and_then(|m| m.modified()) + .unwrap_or(SystemTime::UNIX_EPOCH); + let catalog = ArcCatalog::new(build_catalog(&config_path, &origin)?); + let handler = SharedCatalog::new(catalog.clone()); + + let mut server = ServerFuture::new(handler); + let socket = UdpSocket::bind(bind_addr).await?; + server.register_socket(socket); + + let reload_task = tokio::spawn(async move { + let mut interval = time::interval(reload_interval); + loop { + interval.tick().await; + if let Ok(modified) = fs::metadata(&config_path).and_then(|m| m.modified()) { + if modified > last_modified { + match build_catalog(&config_path, &origin) { + Ok(new_catalog) => { + catalog.replace(new_catalog).await; + last_modified = modified; + } + Err(err) => { + warn!("failed to reload dns dev config: {err:#}"); + } + } + } + } + } + }); + + info!(?bind_addr, "dns-dev server started"); + server.block_until_done().await.anyerr()?; + reload_task.abort(); + Ok(()) +} + +pub fn upsert( + config_path: PathBuf, + origin: String, + endpoint_id: String, + relay: Option, + addrs: Vec, +) -> n0_error::Result<()> { + let mut config = if config_path.exists() { + serde_yml::from_str::(&fs::read_to_string(&config_path)?) + .anyerr()? + } else { + DnsDevConfig { + origin: origin.clone(), + records: Vec::new(), + } + }; + + if config.origin.is_empty() || config.origin != origin { + config.origin = origin; + } + + if let Some(record) = config + .records + .iter_mut() + .find(|r| r.endpoint_id == endpoint_id) + { + if relay.is_some() { + record.relay = relay; + } + if !addrs.is_empty() { + record.addrs = addrs; + } + } else { + config.records.push(DnsDevRecord { + endpoint_id, + relay, + addrs, + }); + } + + let data = serde_yml::to_string(&config).anyerr()?; + fs::write(config_path, data)?; + Ok(()) +} + +fn build_catalog(config_path: &PathBuf, fallback_origin: &str) -> n0_error::Result { + let config = if config_path.exists() { + serde_yml::from_str::(&fs::read_to_string(config_path)?) + .anyerr()? + } else { + DnsDevConfig { + origin: fallback_origin.to_string(), + records: Vec::new(), + } + }; + + let origin = if config.origin.is_empty() { + fallback_origin.to_string() + } else { + config.origin.clone() + }; + let origin = normalize_origin(&origin); + + let zone_name = Name::from_str(&format!("{origin}.")).anyerr()?; + let mut authority = InMemoryAuthority::empty(zone_name.clone(), ZoneType::Primary, false); + + let serial = 1; + let ttl = 30; + let mname = Name::from_str(&format!("ns.{origin}.")).anyerr()?; + let rname = Name::from_str(&format!("admin.{origin}.")).anyerr()?; + let soa = SOA::new(mname.clone(), rname, serial, 60, 60, 60, 30); + let mut soa_record = Record::from_rdata(zone_name.clone(), ttl, RData::SOA(soa)); + soa_record.set_dns_class(DNSClass::IN); + authority.upsert_mut(soa_record, serial); + + let mut ns_record = Record::from_rdata(zone_name.clone(), ttl, RData::NS(NS(mname))); + ns_record.set_dns_class(DNSClass::IN); + authority.upsert_mut(ns_record, serial); + + for record in config.records { + let endpoint_id = EndpointId::from_str(&record.endpoint_id)?; + let z32_id = z32::encode(endpoint_id.as_bytes()); + let name = Name::from_str(&format!("_iroh.{z32_id}.{origin}.")).anyerr()?; + + let mut txt_entries = Vec::new(); + if let Some(relay) = record.relay { + txt_entries.push(format!("relay={relay}")); + } + if !record.addrs.is_empty() { + txt_entries.push(format!("addr={}", record.addrs.join(" "))); + } + if txt_entries.is_empty() { + continue; + } + let txt = TXT::new(txt_entries); + let mut txt_record = Record::from_rdata(name, ttl, RData::TXT(txt)); + txt_record.set_dns_class(DNSClass::IN); + authority.upsert_mut(txt_record, serial); + } + + let mut catalog = Catalog::new(); + catalog.upsert( + zone_name.into(), + vec![std::sync::Arc::new(authority)], + ); + Ok(catalog) +} + +fn normalize_origin(origin: &str) -> String { + origin.trim_end_matches('.').to_string() +} + +#[derive(Clone)] +struct ArcCatalog { + inner: std::sync::Arc>, +} + +impl ArcCatalog { + fn new(catalog: Catalog) -> Self { + Self { + inner: std::sync::Arc::new(RwLock::new(catalog)), + } + } + + async fn replace(&self, catalog: Catalog) { + let mut inner = self.inner.write().await; + *inner = catalog; + info!("dns-dev catalog reloaded"); + } +} + +#[derive(Clone)] +struct SharedCatalog { + inner: ArcCatalog, +} + +impl SharedCatalog { + fn new(inner: ArcCatalog) -> Self { + Self { inner } + } +} + +#[async_trait::async_trait] +impl hickory_server::server::RequestHandler for SharedCatalog { + async fn handle_request( + &self, + request: &hickory_server::server::Request, + response_handle: R, + ) -> hickory_server::server::ResponseInfo { + let catalog = self.inner.inner.read().await; + hickory_server::server::RequestHandler::handle_request( + &*catalog, + request, + response_handle, + ) + .await + } +} diff --git a/cli/src/main.rs b/cli/src/main.rs index bcab4e0..b87f861 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -1,7 +1,11 @@ //! Command line arguments. -use clap::{Parser, Subcommand}; +use clap::{Parser, Subcommand, ValueEnum}; +mod dns_dev; +mod tunnel_dev; + use lib::{ - Advertisment, AdvertismentTicket, ConnectNode, ListenNode, ProxyState, Repo, TcpProxyData, + Advertisment, AdvertismentTicket, ConnectNode, DiscoveryMode, GatewayMode, ListenNode, + ProxyState, Repo, TcpProxyData, datum_cloud::{ApiEnv, DatumCloudClient}, }; use std::{ @@ -30,6 +34,13 @@ enum Commands { /// Start a gateway server that forwards HTTP requests through a Datum Connect tunnel. Gateway(ServeArgs), + /// Run a local DNS server for development TXT records. + #[clap(subcommand)] + DnsDev(DnsDevArgs), + + /// Local entrypoint that tunnels traffic through the gateway using CONNECT. + TunnelDev(TunnelDevArgs), + /// List configured proxies. List, @@ -47,6 +58,71 @@ enum AddCommands { }, } +#[derive(Subcommand, Debug)] +enum DnsDevArgs { + /// Serve a local DNS responder for _iroh TXT records. + Serve(DnsDevServeArgs), + /// Upsert a TXT record into the dev config file. + Upsert(DnsDevUpsertArgs), +} + +#[derive(Parser, Debug)] +pub struct DnsDevServeArgs { + /// UDP bind address for the DNS server. + #[clap(long, default_value = "127.0.0.1:53535")] + pub bind: SocketAddr, + /// Origin domain for _iroh... + #[clap(long)] + pub origin: String, + /// Path to the YAML config file containing records. + #[clap(long, default_value = "dns-dev.yml")] + pub data: PathBuf, + /// Reload interval for reading updated config file. + #[clap(long, default_value = "1s")] + pub reload_interval: humantime::Duration, +} + +#[derive(Parser, Debug)] +pub struct DnsDevUpsertArgs { + /// Origin domain for _iroh... + #[clap(long)] + pub origin: String, + /// Path to the YAML config file containing records. + #[clap(long, default_value = "dns-dev.yml")] + pub data: PathBuf, + /// EndpointId for the TXT record (iroh public key). + #[clap(long)] + pub endpoint_id: String, + /// Optional relay URL. + #[clap(long)] + pub relay: Option, + /// Direct socket addresses for the endpoint (repeatable). + #[clap(long)] + pub addr: Vec, +} + +#[derive(Parser, Debug)] +pub struct TunnelDevArgs { + /// TCP bind address for local browser traffic. + #[clap(long, default_value = "127.0.0.1:8888")] + pub listen: SocketAddr, + /// Gateway address that accepts CONNECT requests. + #[clap(long, default_value = "127.0.0.1:8080")] + pub gateway: SocketAddr, + /// iroh endpoint id for the connector. + #[clap(long)] + pub node_id: String, + /// Target host to dial through the tunnel. + #[clap(long, default_value = "127.0.0.1")] + pub target_host: String, + /// Target port to dial through the tunnel. + #[clap(long)] + pub target_port: u16, + /// Target protocol (must be tcp for now). + #[clap(long, default_value = "tcp")] + pub target_protocol: String, +} + #[derive(Parser, Debug)] pub struct ConnectArgs { /// The addresses to listen on for incoming tcp connections. @@ -72,6 +148,31 @@ pub struct ServeArgs { pub bind_addr: IpAddr, #[clap(long, default_value = "8080")] pub port: u16, + /// Gateway mode for reverse proxy (default) or forward proxy (CONNECT). + #[clap(long, value_enum)] + pub mode: Option, + /// Discovery mode for connection details. + #[clap(long, value_enum)] + pub discovery: Option, + /// DNS origin for _iroh.. lookups. + #[clap(long)] + pub dns_origin: Option, + /// DNS resolver address for discovery (e.g. 127.0.0.1:53535). + #[clap(long)] + pub dns_resolver: Option, +} + +#[derive(Debug, Clone, Copy, ValueEnum)] +pub enum GatewayModeArg { + Reverse, + Forward, +} + +#[derive(Debug, Clone, Copy, ValueEnum)] +pub enum DiscoveryModeArg { + Default, + Dns, + Hybrid, } #[tokio::main] @@ -128,7 +229,30 @@ async fn main() -> n0_error::Result<()> { } Commands::Serve => { let node = ListenNode::new(repo).await?; - println!("listening as {}", node.endpoint_id()); + let endpoint_id = node.endpoint_id(); + println!("listening as {}", endpoint_id); + let bound_addrs = node.endpoint().bound_sockets(); + if !bound_addrs.is_empty() { + println!("iroh bound sockets:"); + for addr in &bound_addrs { + println!(" {addr}"); + } + let z32_id = z32::encode(endpoint_id.as_bytes()); + println!(); + println!("dns-dev lookup:"); + println!(" _iroh.{z32_id}.datumconnect.test"); + println!(); + println!("dns-dev example:"); + println!( + " datum-connect dns-dev upsert --origin datumconnect.test --data ./dns-dev.yml --endpoint-id {} --addr {}", + endpoint_id, + bound_addrs + .iter() + .map(|addr| addr.to_string()) + .collect::>() + .join(" --addr ") + ); + } for p in node.proxies() { if !p.enabled { continue; @@ -172,13 +296,55 @@ async fn main() -> n0_error::Result<()> { Commands::Gateway(args) => { let bind_addr: SocketAddr = (args.bind_addr, args.port).into(); let secret_key = repo.gateway_key().await?; - let config = Default::default(); + let mut config = repo.gateway_config().await?; + if let Some(mode) = args.mode { + config.gateway_mode = match mode { + GatewayModeArg::Reverse => GatewayMode::Reverse, + GatewayModeArg::Forward => GatewayMode::Forward, + }; + } + if let Some(discovery) = args.discovery { + config.common.discovery_mode = match discovery { + DiscoveryModeArg::Default => DiscoveryMode::Default, + DiscoveryModeArg::Dns => DiscoveryMode::Dns, + DiscoveryModeArg::Hybrid => DiscoveryMode::Hybrid, + }; + } + if let Some(origin) = args.dns_origin { + config.common.dns_origin = Some(origin); + } + if let Some(resolver) = args.dns_resolver { + config.common.dns_resolver = Some(resolver); + } println!("serving on port {bind_addr}"); tokio::select! { res = lib::gateway::bind_and_serve(secret_key, config, bind_addr) => res?, _ = tokio::signal::ctrl_c() => {} } } + Commands::DnsDev(args) => match args { + DnsDevArgs::Serve(args) => { + dns_dev::serve( + args.bind, + args.data, + args.origin, + args.reload_interval.into(), + ) + .await?; + } + DnsDevArgs::Upsert(args) => { + dns_dev::upsert( + args.data, + args.origin, + args.endpoint_id, + args.relay, + args.addr, + )?; + } + }, + Commands::TunnelDev(args) => { + tunnel_dev::serve(args).await?; + } } Ok(()) } diff --git a/cli/src/tunnel_dev.rs b/cli/src/tunnel_dev.rs new file mode 100644 index 0000000..2b0c18b --- /dev/null +++ b/cli/src/tunnel_dev.rs @@ -0,0 +1,105 @@ +use std::net::SocketAddr; + +use n0_error::Result; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt, copy_bidirectional}, + net::{TcpListener, TcpStream}, +}; +use tracing::{info, warn}; + +use crate::TunnelDevArgs; + +const MAX_CONNECT_RESPONSE: usize = 16 * 1024; + +pub async fn serve(args: TunnelDevArgs) -> Result<()> { + if args.target_protocol != "tcp" { + n0_error::bail_any!("target-protocol must be tcp for now"); + } + + let listener = TcpListener::bind(args.listen).await?; + info!( + listen = %args.listen, + gateway = %args.gateway, + target = %format!("{}:{}", args.target_host, args.target_port), + "tunnel-dev listening" + ); + + loop { + let (mut inbound, peer) = listener.accept().await?; + let gateway = args.gateway; + let node_id = args.node_id.clone(); + let target_host = args.target_host.clone(); + let target_port = args.target_port; + let target_protocol = args.target_protocol.clone(); + + tokio::spawn(async move { + if let Err(err) = handle_connection( + &mut inbound, + gateway, + &node_id, + &target_host, + target_port, + &target_protocol, + ) + .await + { + warn!(%peer, "tunnel-dev connection failed: {err:#}"); + } + }); + } +} + +async fn handle_connection( + inbound: &mut TcpStream, + gateway: SocketAddr, + node_id: &str, + target_host: &str, + target_port: u16, + _target_protocol: &str, +) -> Result<()> { + let mut outbound = TcpStream::connect(gateway).await?; + let authority = format!("{target_host}:{target_port}"); + let connect_req = format!( + "CONNECT {authority} HTTP/1.1\r\n\ +Host: {authority}\r\n\ +x-iroh-endpoint-id: {node_id}\r\n\ +\r\n" + ); + outbound.write_all(connect_req.as_bytes()).await?; + read_connect_response(&mut outbound).await?; + copy_bidirectional(inbound, &mut outbound).await?; + Ok(()) +} + +async fn read_connect_response(stream: &mut TcpStream) -> Result<()> { + let mut buf = Vec::new(); + let mut scratch = [0u8; 1024]; + let header_end = loop { + let read = stream.read(&mut scratch).await?; + if read == 0 { + n0_error::bail_any!("gateway closed before CONNECT response"); + } + buf.extend_from_slice(&scratch[..read]); + if buf.len() > MAX_CONNECT_RESPONSE { + n0_error::bail_any!("CONNECT response headers too large"); + } + if let Some(pos) = find_header_end(&buf) { + break pos; + } + }; + + let header = std::str::from_utf8(&buf[..header_end]).map_err(|_| { + n0_error::anyerr!("CONNECT response was not valid UTF-8") + })?; + let status_line = header.lines().next().unwrap_or_default(); + if !status_line.contains(" 200 ") && !status_line.starts_with("HTTP/1.1 200") { + n0_error::bail_any!("CONNECT failed: {status_line}"); + } + Ok(()) +} + +fn find_header_end(buf: &[u8]) -> Option { + buf.windows(4) + .position(|window| window == b"\r\n\r\n") + .map(|pos| pos + 4) +} diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 6f11990..3c96654 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -17,6 +17,7 @@ iroh-n0des.workspace = true iroh-proxy-utils.workspace = true iroh-tickets.workspace = true iroh.workspace = true +iroh-relay.workspace = true log.workspace = true n0-error.workspace = true n0-future.workspace = true diff --git a/lib/src/config.rs b/lib/src/config.rs index 059ef69..2fa7947 100644 --- a/lib/src/config.rs +++ b/lib/src/config.rs @@ -1,12 +1,32 @@ use std::{ fs, - net::{SocketAddrV4, SocketAddrV6}, + net::{SocketAddr, SocketAddrV4, SocketAddrV6}, path::PathBuf, }; use n0_error::{Result, StackResultExt, StdResultExt}; use serde::{Deserialize, Serialize}; +#[derive(Debug, Clone, Copy, Serialize, Deserialize, Default)] +#[serde(rename_all = "snake_case")] +pub enum DiscoveryMode { + #[default] + /// Use the built-in n0des discovery defaults. + Default, + /// Use only DNS discovery (_iroh..). + Dns, + /// Use both n0des defaults and DNS discovery. + Hybrid, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, Default)] +#[serde(rename_all = "snake_case")] +pub enum GatewayMode { + #[default] + Reverse, + Forward, +} + #[derive(Debug, Clone, Default, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] pub struct Config { @@ -21,6 +41,33 @@ pub struct Config { /// If None, defaults to a random free port, but it can be useful to specify a fixed /// port, e.g. to configure a firewall rule. pub ipv6_addr: Option, + + /// How the gateway resolves endpoint connection details. + #[serde(default)] + pub discovery_mode: DiscoveryMode, + + /// DNS origin domain used for _iroh.. lookups. + /// + /// Required when discovery_mode is `dns` or `hybrid`. + #[serde(default)] + pub dns_origin: Option, + + /// Optional DNS resolver address for discovery lookups. + /// + /// Useful for local development (e.g. 127.0.0.1:53535). + #[serde(default)] + pub dns_resolver: Option, + +} + +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub struct GatewayConfig { + #[serde(flatten)] + pub common: Config, + /// Gateway operating mode for HTTP proxying. + #[serde(default)] + pub gateway_mode: GatewayMode, } impl Config { @@ -38,3 +85,19 @@ impl Config { Ok(()) } } + +impl GatewayConfig { + pub async fn from_file(path: PathBuf) -> Result { + let config = tokio::fs::read_to_string(path) + .await + .context("reading config file")?; + let config = serde_yml::from_str(&config).std_context("parsing config file")?; + Ok(config) + } + + pub async fn write(&self, path: PathBuf) -> Result<()> { + let data = serde_yml::to_string(self).anyerr()?; + fs::write(path, data)?; + Ok(()) + } +} diff --git a/lib/src/gateway.rs b/lib/src/gateway.rs index 96fd030..225f966 100644 --- a/lib/src/gateway.rs +++ b/lib/src/gateway.rs @@ -1,13 +1,13 @@ -use std::{io, net::SocketAddr, sync::Arc}; +use std::{io, net::SocketAddr, str::FromStr, sync::Arc}; use askama::Template; use hyper::StatusCode; -use iroh::{Endpoint, SecretKey}; +use iroh::{Endpoint, EndpointId, SecretKey}; use iroh_proxy_utils::{ - HttpOriginRequest, HttpResponse, + HttpOriginRequest, HttpProxyRequest, HttpProxyRequestKind, HttpResponse, downstream::{ - DownstreamProxy, EndpointAuthority, ExtractError, HttpProxyOpts, ProxyMode, - ReverseProxyResolver, WriteErrorResponse, + DownstreamProxy, EndpointAuthority, ExtractError, ForwardProxyResolver, HttpProxyOpts, + ProxyMode, ReverseProxyResolver, WriteErrorResponse, }, }; use n0_error::Result; @@ -23,35 +23,59 @@ use crate::{ pub async fn bind_and_serve( secret_key: SecretKey, - config: crate::config::Config, + config: crate::config::GatewayConfig, tcp_bind_addr: SocketAddr, ) -> Result<()> { let listener = TcpListener::bind(tcp_bind_addr).await?; - let endpoint = build_endpoint(secret_key, &config).await?; - let n0des_api_secret = n0des_api_secret_from_env()?; - let n0des = build_n0des_client(&endpoint, n0des_api_secret).await?; - serve(endpoint, n0des, listener).await + let endpoint = build_endpoint(secret_key, &config.common).await?; + let n0des = match config.gateway_mode { + crate::config::GatewayMode::Reverse => { + let n0des_api_secret = n0des_api_secret_from_env()?; + Some(build_n0des_client(&endpoint, n0des_api_secret).await?) + } + crate::config::GatewayMode::Forward => None, + }; + serve(endpoint, n0des, listener, config).await } pub async fn serve( endpoint: Endpoint, - n0des: Arc, + n0des: Option>, listener: TcpListener, + config: crate::config::GatewayConfig, ) -> Result<()> { let tcp_bind_addr = listener.local_addr()?; - info!(?tcp_bind_addr, endpoint_id = %endpoint.id().fmt_short(),"TCP proxy gateway started"); + info!( + ?tcp_bind_addr, + endpoint_id = %endpoint.id().fmt_short(), + gateway_mode = ?config.gateway_mode, + "TCP proxy gateway started" + ); let proxy = DownstreamProxy::new(endpoint, Default::default()); - let tickets = TicketClient::new(n0des); - let resolver = Resolver { tickets }; - let opts = HttpProxyOpts::default() - // Right now the gatewy functions as a reverse proxy, i.e. incoming requests are regular origin-form HTTP - // requests, and we resolve the destination from the host header's subdomain. - // Once envoy takes over this part, we will use [`HttpProxyOpts::forward`] instead, i.e. accept CONNECT - // requests only. - .reverse(resolver) - .error_response_writer(ErrorResponseWriter); - let mode = ProxyMode::Http(opts); + let mode = match config.gateway_mode { + crate::config::GatewayMode::Reverse => { + let n0des = n0des.ok_or_else(|| { + n0_error::anyerr!("n0des client is required for reverse gateway mode") + })?; + let tickets = TicketClient::new(n0des); + let resolver = Resolver { tickets }; + let opts = HttpProxyOpts::default() + // Right now the gateway functions as a reverse proxy, i.e. incoming requests are regular origin-form HTTP + // requests, and we resolve the destination from the host header's subdomain. + .reverse(resolver) + .error_response_writer(ErrorResponseWriter); + ProxyMode::Http(opts) + } + crate::config::GatewayMode::Forward => { + let resolver = ForwardResolver; + let opts = HttpProxyOpts::default() + // Forward proxy mode accepts CONNECT authority-form requests. + .forward(resolver) + .error_response_writer(ErrorResponseWriter); + ProxyMode::Http(opts) + } + }; proxy.forward_tcp_listener(listener, mode).await } @@ -70,8 +94,7 @@ impl ReverseProxyResolver for Resolver { &'a self, req: &'a HttpOriginRequest, ) -> Result { - let host = req.headers.get("host").ok_or(ExtractError::BadRequest)?; - let host = host.to_str().map_err(|_| ExtractError::BadRequest)?; + let host = req.host().ok_or(ExtractError::BadRequest)?; let codename = extract_subdomain(host).ok_or(ExtractError::NotFound)?; debug!(%codename, "extracted codename, resolving ticket..."); @@ -90,17 +113,28 @@ impl ReverseProxyResolver for Resolver { } } -// /// When operating in forward-proxy mode, i.e. when accepting CONNECT requests or requests -// /// with absolute-form targets, we only need to resolve an endpoint id from the request, -// /// because the upstream authority is already part of the original request. -// impl ForwardProxyResolver for Resolver { -// async fn destination<'a>( -// &'a self, -// req: &'a HttpProxyRequest, -// ) -> Result { -// todo!() -// } -// } +const HEADER_NODE_ID: &str = "x-iroh-endpoint-id"; + +/// When operating in forward-proxy mode we accept CONNECT requests and resolve the target +/// endpoint ID from headers injected by Envoy. +struct ForwardResolver; + +impl ForwardProxyResolver for ForwardResolver { + async fn destination<'a>( + &'a self, + req: &'a HttpProxyRequest, + ) -> Result { + if !matches!(req.kind, HttpProxyRequestKind::Tunnel { .. }) { + return Err(ExtractError::BadRequest); + } + let node_id = header_value(req, HEADER_NODE_ID).ok_or(ExtractError::BadRequest)?; + EndpointId::from_str(node_id).map_err(|_| ExtractError::BadRequest) + } +} + +fn header_value<'a>(req: &'a HttpProxyRequest, name: &str) -> Option<&'a str> { + req.headers.get(name).and_then(|value| value.to_str().ok()) +} pub(super) fn extract_subdomain(host: &str) -> Option<&str> { let host = host diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 294d34f..9119f6b 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -1,5 +1,5 @@ mod auth; -mod config; +pub mod config; pub mod datum_cloud; pub mod gateway; mod node; @@ -9,6 +9,7 @@ mod state; pub use node::*; pub use repo::Repo; pub use state::*; +pub use config::{Config, DiscoveryMode, GatewayConfig, GatewayMode}; /// The root domain for datum connect urls to subdomain from. A proxy URL will /// be a three-word-codename subdomain off this URL. eg: "https://vast-gold-mine.iroh.datum.net" diff --git a/lib/src/node.rs b/lib/src/node.rs index 04f9d08..ae69631 100644 --- a/lib/src/node.rs +++ b/lib/src/node.rs @@ -6,7 +6,11 @@ use std::{ time::Duration, }; -use iroh::{Endpoint, EndpointId, SecretKey, protocol::Router}; +use iroh::{ + Endpoint, EndpointId, SecretKey, discovery::dns::DnsDiscovery, endpoint::default_relay_mode, + protocol::Router, +}; +use iroh_relay::dns::{DnsProtocol, DnsResolver}; use iroh_n0des::ApiSecret; use iroh_proxy_utils::{ALPN as IROH_HTTP_CONNECT_ALPN, HttpProxyRequest, HttpProxyRequestKind}; use iroh_proxy_utils::{ @@ -375,13 +379,37 @@ impl OutboundProxyHandle { /// Build a new iroh endpoint, applying all relevant details from Configuration /// to the base endpoint setup pub(crate) async fn build_endpoint(secret_key: SecretKey, common: &Config) -> Result { - let mut builder = Endpoint::builder().secret_key(secret_key); + let mut builder = match common.discovery_mode { + crate::config::DiscoveryMode::Dns => { + Endpoint::empty_builder(default_relay_mode()).secret_key(secret_key) + } + crate::config::DiscoveryMode::Default + | crate::config::DiscoveryMode::Hybrid => Endpoint::builder().secret_key(secret_key), + }; if let Some(addr) = common.ipv4_addr { builder = builder.bind_addr_v4(addr); } if let Some(addr) = common.ipv6_addr { builder = builder.bind_addr_v6(addr); } + match common.discovery_mode { + crate::config::DiscoveryMode::Default => {} + crate::config::DiscoveryMode::Dns | crate::config::DiscoveryMode::Hybrid => { + let origin = match &common.dns_origin { + Some(origin) => origin.clone(), + None => n0_error::bail_any!( + "dns_origin is required when discovery_mode is set to dns or hybrid" + ), + }; + if let Some(resolver_addr) = common.dns_resolver { + let resolver = DnsResolver::builder() + .with_nameserver(resolver_addr, DnsProtocol::Udp) + .build(); + builder = builder.dns_resolver(resolver); + } + builder = builder.discovery(DnsDiscovery::builder(origin)); + } + } let endpoint = builder.bind().await?; info!(id = %endpoint.id(), "iroh endpoint bound"); Ok(endpoint) diff --git a/lib/src/repo.rs b/lib/src/repo.rs index c4ff71b..7f83dce 100644 --- a/lib/src/repo.rs +++ b/lib/src/repo.rs @@ -4,7 +4,10 @@ use iroh::SecretKey; use log::{info, warn}; use n0_error::{Result, StackResultExt, StdResultExt}; -use crate::{StateWrapper, auth::Auth, config::Config, datum_cloud::AuthState, state::State}; +use crate::{ + StateWrapper, auth::Auth, config::{Config, GatewayConfig}, datum_cloud::AuthState, + state::State, +}; // Repo builds up a series of file path conventions from a root directory path. #[derive(Debug, Clone)] @@ -51,6 +54,18 @@ impl Repo { Config::from_file(config_file_path).await } + pub async fn gateway_config(&self) -> Result { + let config_file_path = self.0.join(Self::CONFIG_FILE); + if !config_file_path.exists() { + warn!("gateway config does not exist. creating new config"); + let cfg = GatewayConfig::default(); + cfg.write(config_file_path).await?; + return Ok(cfg); + }; + + GatewayConfig::from_file(config_file_path).await + } + pub async fn load_state(&self) -> Result { let state_file_path = self.0.join(Self::STATE_FILE); let state = if !state_file_path.exists() { diff --git a/lib/src/tests.rs b/lib/src/tests.rs index 9217c53..e8a41ee 100644 --- a/lib/src/tests.rs +++ b/lib/src/tests.rs @@ -5,10 +5,11 @@ use iroh::{Endpoint, discovery::static_provider::StaticProvider}; use n0_error::{Result, StdResultExt}; use n0_future::task::AbortOnDropHandle; use n0_tracing_test::traced_test; -use tokio::net::TcpListener; +use tokio::{io::{AsyncReadExt, AsyncWriteExt}, net::TcpListener}; use crate::{ - Advertisment, ListenNode, ProxyState, Repo, TcpProxyData, build_n0des_client, gateway, + Advertisment, GatewayConfig, GatewayMode, ListenNode, ProxyState, Repo, TcpProxyData, + build_n0des_client, gateway, }; #[derive(Default)] @@ -53,7 +54,11 @@ async fn gateway_end_to_end_to_upstream_http() -> Result<()> { let endpoint = Endpoint::bind().await?; discovery.add(&endpoint); let n0des = build_n0des_client(&endpoint, api_secret).await?; - let task = tokio::task::spawn(gateway::serve(endpoint, n0des, listener)); + let config = GatewayConfig { + gateway_mode: GatewayMode::Reverse, + ..Default::default() + }; + let task = tokio::task::spawn(gateway::serve(endpoint, Some(n0des), listener, config)); (addr, AbortOnDropHandle::new(task)) }; @@ -77,6 +82,79 @@ async fn gateway_end_to_end_to_upstream_http() -> Result<()> { Ok(()) } +#[tokio::test] +#[traced_test] +async fn gateway_forward_connect_tunnel() -> Result<()> { + let discovery = TestDiscovery::default(); + + let n0des_endpoint = Endpoint::bind().await?; + discovery.add(&n0des_endpoint); + let (api_secret, _n0des_router) = n0des_local::start(n0des_endpoint)?; + + let temp_dir = tempfile::tempdir()?; + let repo = Repo::open_or_create(temp_dir.path()).await?; + + let (origin_addr, _origin_task) = origin_server::spawn("origin").await?; + + let proxy_state = { + let data = TcpProxyData::from_host_port_str(&origin_addr.to_string())?; + let advertisment = Advertisment::new(data, None); + ProxyState::new(advertisment) + }; + + let upstream = ListenNode::with_n0des_api_secret(repo, api_secret.clone()).await?; + discovery.add(&upstream.endpoint()); + upstream.set_proxy(proxy_state).await?; + + let (gateway_addr, _gateway_task) = { + let listener = TcpListener::bind("127.0.0.1:0").await?; + let addr = listener.local_addr()?; + let endpoint = Endpoint::bind().await?; + discovery.add(&endpoint); + let config = GatewayConfig { + gateway_mode: GatewayMode::Forward, + ..Default::default() + }; + let task = tokio::task::spawn(gateway::serve(endpoint, None, listener, config)); + (addr, AbortOnDropHandle::new(task)) + }; + + let mut stream = tokio::net::TcpStream::connect(gateway_addr).await?; + let connect_request = format!( + "CONNECT {target} HTTP/1.1\r\nHost: {target}\r\nx-iroh-endpoint-id: {node_id}\r\n\r\n", + target = origin_addr, + node_id = upstream.endpoint_id(), + ); + stream.write_all(connect_request.as_bytes()).await?; + + let mut response = String::new(); + let mut buffer = [0u8; 512]; + loop { + let read = stream.read(&mut buffer).await?; + if read == 0 { + break; + } + response.push_str(&String::from_utf8_lossy(&buffer[..read])); + if response.contains("\r\n\r\n") { + break; + } + } + assert!(response.contains("200"), "unexpected CONNECT response: {response}"); + + stream + .write_all(b"GET /hello HTTP/1.1\r\nHost: origin\r\n\r\n") + .await?; + let mut body = vec![0u8; 1024]; + let read = stream.read(&mut body).await?; + let body = String::from_utf8_lossy(&body[..read]); + assert!( + body.contains("origin GET /hello"), + "unexpected tunneled response: {body}" + ); + + Ok(()) +} + mod origin_server { use std::{convert::Infallible, net::SocketAddr, sync::Arc}; diff --git a/scripts/try-ui-demo.sh b/scripts/try-ui-demo.sh new file mode 100755 index 0000000..cf62a71 --- /dev/null +++ b/scripts/try-ui-demo.sh @@ -0,0 +1,147 @@ +#!/usr/bin/env bash +set -euo pipefail + +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +export DATUM_CONNECT_REPO="${DATUM_CONNECT_REPO:-$REPO_ROOT/.datum-connect-dev}" + +ORIGIN="${ORIGIN:-datumconnect.test}" +DNS_BIND="${DNS_BIND:-127.0.0.1:53535}" +GATEWAY_PORT="${GATEWAY_PORT:-8080}" +ORIGIN_PORT="${ORIGIN_PORT:-5173}" +TUNNEL_PORT="${TUNNEL_PORT:-8888}" +DX_PORT="${DX_PORT:-8081}" +DNS_DATA="${DNS_DATA:-$REPO_ROOT/dns-dev.yml}" + +cleanup() { + pkill -f "datum-connect dns-dev serve" || true + pkill -f "datum-connect serve" || true + pkill -f "datum-connect gateway" || true + pkill -f "datum-connect tunnel-dev" || true + pkill -f "openssl s_server -accept ${ORIGIN_PORT}" || true +} +trap cleanup EXIT + +kill_port() { + local port="$1" + local pids + pids="$(lsof -tiTCP:"$port" -sTCP:LISTEN 2>/dev/null || true)" + if [[ -n "$pids" ]]; then + kill $pids 2>/dev/null || true + fi +} + +kill_port "53535" +kill_port "${GATEWAY_PORT}" +kill_port "${ORIGIN_PORT}" +kill_port "${TUNNEL_PORT}" + +if ! command -v openssl >/dev/null 2>&1; then + echo "openssl is required for the local HTTPS origin." + exit 1 +fi + +if ! command -v dx >/dev/null 2>&1; then + echo "dx (dioxus-cli) is not installed; GUI will not be started automatically." +fi + +echo "Starting dns-dev server..." +(cd "$REPO_ROOT" && cargo run -p datum-connect -- dns-dev serve \ + --origin "$ORIGIN" \ + --bind "$DNS_BIND" \ + --data "$DNS_DATA") >/tmp/datum-connect-dns-dev.log 2>&1 & + +echo "Starting local HTTPS origin on ${ORIGIN_PORT}..." +openssl req -x509 -nodes -newkey rsa:2048 -days 1 \ + -keyout /tmp/iroh-dev.key -out /tmp/iroh-dev.crt \ + -subj "/CN=localhost" >/tmp/datum-connect-openssl.log 2>&1 +openssl s_server -accept "$ORIGIN_PORT" -cert /tmp/iroh-dev.crt -key /tmp/iroh-dev.key -www \ + >/tmp/datum-connect-origin.log 2>&1 & + +echo "Starting gateway in forward mode..." +(cd "$REPO_ROOT" && cargo run -p datum-connect -- gateway \ + --port "$GATEWAY_PORT" \ + --mode forward \ + --discovery dns \ + --dns-origin "$ORIGIN" \ + --dns-resolver "$DNS_BIND") >/tmp/datum-connect-gateway.log 2>&1 & + +echo "Starting listen node..." +(cd "$REPO_ROOT" && cargo run -p datum-connect -- serve) >/tmp/datum-connect-serve.log 2>&1 & + +echo "Waiting for listen node output..." +ENDPOINT_ID="" +V4_ADDR="" +V6_ADDR="" +for _ in $(seq 1 240); do + if [[ -z "$ENDPOINT_ID" ]]; then + ENDPOINT_ID="$(grep -Eo 'listening as [0-9a-f]+' /tmp/datum-connect-serve.log | awk '{print $3}' | tail -n1 || true)" + fi + if [[ -z "$V4_ADDR" ]]; then + V4_ADDR="$(grep -Eo '0.0.0.0:[0-9]+' /tmp/datum-connect-serve.log | tail -n1 || true)" + V4_ADDR="${V4_ADDR/0.0.0.0/127.0.0.1}" + fi + if [[ -z "$V6_ADDR" ]]; then + V6_ADDR="$(grep -Eo '\\[::\\]:[0-9]+' /tmp/datum-connect-serve.log | tail -n1 || true)" + V6_ADDR="${V6_ADDR/\\[::\\]/[::1]}" + fi + if [[ -n "$ENDPOINT_ID" && -n "$V4_ADDR" ]]; then + break + fi + sleep 0.25 +done + +if [[ -z "$ENDPOINT_ID" || -z "$V4_ADDR" ]]; then + echo "Failed to detect endpoint id or bound sockets." + echo "serve log tail:" + tail -n 50 /tmp/datum-connect-serve.log || true + echo "gateway log tail:" + tail -n 20 /tmp/datum-connect-gateway.log || true + echo "dns-dev log tail:" + tail -n 20 /tmp/datum-connect-dns-dev.log || true + exit 1 +fi + +echo "Publishing TXT records via dns-dev..." +CMD=(cargo run -p datum-connect -- dns-dev upsert \ + --origin "$ORIGIN" \ + --data "$DNS_DATA" \ + --endpoint-id "$ENDPOINT_ID" \ + --addr "$V4_ADDR") +if [[ -n "$V6_ADDR" ]]; then + CMD+=(--addr "$V6_ADDR") +fi +(cd "$REPO_ROOT" && "${CMD[@]}") >/tmp/datum-connect-dns-upsert.log 2>&1 + +echo "Starting tunnel-dev entrypoint on ${TUNNEL_PORT}..." +(cd "$REPO_ROOT" && cargo run -p datum-connect -- tunnel-dev \ + --gateway "127.0.0.1:${GATEWAY_PORT}" \ + --node-id "$ENDPOINT_ID" \ + --target-host 127.0.0.1 \ + --target-port "${ORIGIN_PORT}" \ + --listen "127.0.0.1:${TUNNEL_PORT}") >/tmp/datum-connect-tunnel-dev.log 2>&1 & + +if command -v dx >/dev/null 2>&1; then + echo "Starting GUI..." + (cd "$REPO_ROOT/ui" && dx serve --platform desktop --port "$DX_PORT") >/tmp/datum-connect-ui.log 2>&1 & +fi + +cat <