niom-turn/src/main.rs

263 lines
9.2 KiB
Rust

//! Binary entry point that wires configuration, UDP listener, optional TLS listener, and allocation handling.
//! Backlog: graceful shutdown signals, structured metrics, and coordinated lifecycle management across listeners.
use std::net::SocketAddr;
use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs};
use std::sync::Arc;
use std::time::Duration;
use tokio::net::UdpSocket;
use tracing::{error, info, warn};
// Use the library crate's public modules instead of local `mod` declarations.
use niom_turn::alloc::AllocationManager;
use niom_turn::alloc::AllocationOptions;
use niom_turn::auth::{AuthManager, InMemoryStore};
use niom_turn::config::{AuthOptions, Config};
use niom_turn::rate_limit::RateLimiters;
// Workaround: allow relay.* config fields to be hostnames by resolving them once at startup.
fn resolve_host_or_ip(label: &str, value: &str) -> Option<IpAddr> {
if let Ok(ip) = value.parse() {
return Some(ip);
}
let target = format!("{}:0", value);
match target.to_socket_addrs() {
Ok(mut addrs) => addrs.next().map(|addr| addr.ip()).or_else(|| {
warn!("workaround: {}='{}' resolved to no addresses", label, value);
None
}),
Err(e) => {
warn!("workaround: failed to resolve {}='{}': {:?}", label, value, e);
None
}
}
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
// Bootstrap configuration: prefer appsettings.json, otherwise rely on baked-in demo defaults.
let (cfg, cfg_source) = match Config::load_default() {
Ok(c) => (c, "appsettings.json".to_string()),
Err(e) => {
eprintln!(
"no appsettings.json found or failed to load: {} — using defaults",
e
);
// defaults
(
Config {
server: niom_turn::config::ServerOptions {
bind: "0.0.0.0:3478".to_string(),
udp_bind: None,
tcp_bind: None,
tls_bind: "0.0.0.0:5349".to_string(),
enable_udp: true,
enable_tcp: true,
enable_tls: true,
tls_cert: None,
tls_key: None,
},
credentials: vec![niom_turn::config::CredentialEntry {
username: "testuser".into(),
password: "secretpassword".into(),
}],
auth: AuthOptions::default(),
relay: niom_turn::config::RelayOptions::default(),
logging: niom_turn::config::LoggingOptions::default(),
limits: niom_turn::config::LimitsOptions::default(),
},
"defaults".to_string(),
)
}
};
let log_directive = cfg
.logging
.default_directive
.as_deref()
.unwrap_or("warn,niom_turn=info");
niom_turn::logging::init_tracing_with_default(log_directive);
// Build per-server rate limiters (defaults to disabled when unset).
let rate_limiters = Arc::new(RateLimiters::from_limits(&cfg.limits));
info!("niom-turn starting");
info!("config source={} realm={} creds={} rest_secret={} tls_cert={}",
cfg_source,
cfg.auth.realm,
cfg.credentials.len(),
cfg.auth.rest_secret.is_some(),
cfg.server.tls_cert.is_some()
);
info!("logging.default_directive={}", log_directive);
let udp_bind = cfg
.server
.udp_bind
.clone()
.unwrap_or_else(|| cfg.server.bind.clone());
let tcp_bind = cfg
.server
.tcp_bind
.clone()
.unwrap_or_else(|| cfg.server.bind.clone());
let tls_bind = cfg.server.tls_bind.clone();
let udp_bind_addr: SocketAddr = udp_bind.parse()?;
// Materialise the credential backend before starting network endpoints.
let creds = InMemoryStore::new();
for c in cfg.credentials.iter() {
creds.insert(&c.username, &c.password);
info!("credential loaded user={} len={}", c.username, c.password.len());
}
let auth = AuthManager::new(creds.clone(), &cfg.auth);
let relay_bind_ip: IpAddr = cfg
.relay
.relay_bind_ip
.as_deref()
.and_then(|s| resolve_host_or_ip("relay.relay_bind_ip", s))
.unwrap_or(IpAddr::V4(Ipv4Addr::UNSPECIFIED));
let advertised_ip: Option<IpAddr> = cfg
.relay
.advertised_ip
.as_deref()
.and_then(|s| resolve_host_or_ip("relay.advertised_ip", s));
let alloc_mgr = AllocationManager::new_with_options(AllocationOptions {
relay_bind_ip,
relay_port_min: cfg.relay.relay_port_min,
relay_port_max: cfg.relay.relay_port_max,
advertised_ip,
max_allocations_per_ip: cfg.limits.max_allocations_per_ip,
max_permissions_per_allocation: cfg.limits.max_permissions_per_allocation,
max_channel_bindings_per_allocation: cfg.limits.max_channel_bindings_per_allocation,
});
// Periodically prune expired allocations so relay tasks can terminate even when idle.
alloc_mgr.spawn_housekeeping(Duration::from_secs(5));
// Periodically emit a compact metrics snapshot to logs.
tokio::spawn(async move {
loop {
tokio::time::sleep(Duration::from_secs(30)).await;
let snap = niom_turn::metrics::snapshot();
info!(
"metrics: stun={} channeldata={} streams={} auth_challenge={} auth_stale={} auth_reject={} alloc_total={} alloc_ok={} alloc_fail={} perms_added={} channels_added={} alloc_active={} rate_limited={}",
snap.stun_messages_total,
snap.channel_data_total,
snap.stream_connections_total,
snap.auth_challenge_total,
snap.auth_stale_total,
snap.auth_reject_total,
snap.allocate_total,
snap.allocate_success_total,
snap.allocate_fail_total,
snap.permissions_added_total,
snap.channel_bindings_added_total,
snap.allocations_active,
snap.rate_limited_total
);
}
});
info!(
"listeners: udp={} tcp={} tls={} udp_bind={} tcp_bind={} tls_bind={}",
cfg.server.enable_udp,
cfg.server.enable_tcp,
cfg.server.enable_tls,
udp_bind,
tcp_bind,
tls_bind
);
info!(
"relay: bind_ip={} port_range={:?}-{:?} advertised_ip={:?}",
relay_bind_ip,
cfg.relay.relay_port_min,
cfg.relay.relay_port_max,
advertised_ip
);
// Bind the UDP socket that receives STUN/TURN traffic from WebRTC clients.
let udp = if cfg.server.enable_udp {
let udp = UdpSocket::bind(udp_bind_addr).await?;
Some(Arc::new(udp))
} else {
None
};
// Spawn the asynchronous packet loop that handles all UDP requests.
if let Some(udp_sock) = udp.clone() {
let udp_clone = udp_sock.clone();
let auth_clone = auth.clone();
let alloc_clone = alloc_mgr.clone();
let rl = rate_limiters.clone();
tokio::spawn(async move {
if let Err(e) = niom_turn::server::udp_reader_loop_with_limits(
udp_clone,
auth_clone,
alloc_clone,
rl,
)
.await
{
error!("udp loop error: {:?}", e);
}
});
}
// Start a plain TCP listener for `turn:` clients that require TCP.
if cfg.server.enable_tcp {
let auth_for_tcp = auth.clone();
let alloc_for_tcp = alloc_mgr.clone();
let tcp_bind = tcp_bind.clone();
let rl = rate_limiters.clone();
tokio::spawn(async move {
if let Err(e) = niom_turn::tcp::serve_tcp_with_limits(
&tcp_bind,
auth_for_tcp,
alloc_for_tcp,
rl,
)
.await
{
error!("tcp serve failed: {:?}", e);
}
});
}
// Optionally start the TLS listener so `turns:` clients can connect via TCP/TLS.
if cfg.server.enable_tls {
if let (Some(cert), Some(key)) = (cfg.server.tls_cert.clone(), cfg.server.tls_key.clone()) {
let auth_for_tls = auth.clone();
let alloc_for_tls = alloc_mgr.clone();
let tls_bind = tls_bind.clone();
let rl = rate_limiters.clone();
tokio::spawn(async move {
if let Err(e) = niom_turn::tls::serve_tls_with_limits(
&tls_bind,
&cert,
&key,
auth_for_tls,
alloc_for_tls,
rl,
)
.await
{
error!("tls serve failed: {:?}", e);
}
});
} else {
info!("TLS enabled but tls_cert/tls_key not configured; skipping TLS listener");
}
}
// Keep the runtime alive while background tasks process packets.
loop {
tokio::time::sleep(std::time::Duration::from_secs(60)).await;
}
}